source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unop__minv_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_int8_int8) // op(A') function: GB (_unop_tran__minv_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_int8_int8) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cross.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <ops/declarable/helpers/helpers.h> namespace nd4j { namespace ops { namespace helpers { void FORCEINLINE _cross(NDArray *a, NDArray *b, NDArray *o) { if (a->isR()) { auto a0 = a->e<double>(0); auto a1 = a->e<double>(1); auto a2 = a->e<double>(2); auto b0 = b->e<double>(0); auto b1 = b->e<double>(1); auto b2 = b->e<double>(2); Nd4jLong idx = 0L; o->p(Nd4jLong(0L), a1 * b2 - a2 * b1); o->p(1L, a2 * b0 - a0 * b2); o->p(2L, a0 * b1 - a1 * b0); } else { auto a0 = a->e<Nd4jLong>(0); auto a1 = a->e<Nd4jLong>(1); auto a2 = a->e<Nd4jLong>(2); auto b0 = b->e<Nd4jLong>(0); auto b1 = b->e<Nd4jLong>(1); auto b2 = b->e<Nd4jLong>(2); Nd4jLong idx = 0L; o->p(Nd4jLong(0L), a1 * b2 - a2 * b1); o->p(1L, a2 * b0 - a0 * b2); o->p(2L, a0 * b1 - a1 * b0); } } void FORCEINLINE _crossBatched(NDArray *a, NDArray *b, NDArray *o) { auto _a = a->reshape(a->ordering(), {-1, 3}); auto _b = b->reshape(b->ordering(), {-1, 3}); auto _o = o->reshape(o->ordering(), {-1, 3}); auto tadsA = _a->allTensorsAlongDimension({1}); auto tadsB = _b->allTensorsAlongDimension({1}); auto tadsO = _o->allTensorsAlongDimension({1}); int tads = tadsA->size(); #pragma omp parallel for simd schedule(static) for (int e = 0; e < tads; e++) { auto a_ = tadsA->at(e); auto b_ = tadsB->at(e); auto o_ = tadsO->at(e); helpers::_cross(a_, b_, o_); } delete tadsA; delete tadsB; delete tadsO; delete _a; delete _b; delete _o; } void weightedCrossEntropyWithLogitsFunctor(NDArray const* targets, NDArray const* input, NDArray const* weights, NDArray* output); } } }
c-typeck.c
/* Build expressions with type checking for C compiler. Copyright (C) 1987-2018 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* This file is part of the C front end. It contains routines to build C expressions given their operands, including computing the types of the result, C-specific error checks, and some optimization. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "target.h" #include "function.h" #include "bitmap.h" #include "c-tree.h" #include "gimple-expr.h" #include "predict.h" #include "stor-layout.h" #include "trans-mem.h" #include "varasm.h" #include "stmt.h" #include "langhooks.h" #include "c-lang.h" #include "intl.h" #include "tree-iterator.h" #include "gimplify.h" #include "tree-inline.h" #include "omp-general.h" #include "c-family/c-objc.h" #include "c-family/c-ubsan.h" #include "gomp-constants.h" #include "spellcheck-tree.h" #include "gcc-rich-location.h" #include "stringpool.h" #include "attribs.h" #include "asan.h" /* Possible cases of implicit bad conversions. Used to select diagnostic messages in convert_for_assignment. */ enum impl_conv { ic_argpass, ic_assign, ic_init, ic_return }; /* The level of nesting inside "__alignof__". */ int in_alignof; /* The level of nesting inside "sizeof". */ int in_sizeof; /* The level of nesting inside "typeof". */ int in_typeof; /* The argument of last parsed sizeof expression, only to be tested if expr.original_code == SIZEOF_EXPR. */ tree c_last_sizeof_arg; location_t c_last_sizeof_loc; /* Nonzero if we might need to print a "missing braces around initializer" message within this initializer. */ static int found_missing_braces; static int require_constant_value; static int require_constant_elements; static bool null_pointer_constant_p (const_tree); static tree qualify_type (tree, tree); static int tagged_types_tu_compatible_p (const_tree, const_tree, bool *, bool *); static int comp_target_types (location_t, tree, tree); static int function_types_compatible_p (const_tree, const_tree, bool *, bool *); static int type_lists_compatible_p (const_tree, const_tree, bool *, bool *); static tree lookup_field (tree, tree); static int convert_arguments (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *, tree, tree); static tree pointer_diff (location_t, tree, tree, tree *); static tree convert_for_assignment (location_t, location_t, tree, tree, tree, enum impl_conv, bool, tree, tree, int); static tree valid_compound_expr_initializer (tree, tree); static void push_string (const char *); static void push_member_name (tree); static int spelling_length (void); static char *print_spelling (char *); static void warning_init (location_t, int, const char *); static tree digest_init (location_t, tree, tree, tree, bool, bool, int); static void output_init_element (location_t, tree, tree, bool, tree, tree, bool, bool, struct obstack *); static void output_pending_init_elements (int, struct obstack *); static bool set_designator (location_t, bool, struct obstack *); static void push_range_stack (tree, struct obstack *); static void add_pending_init (location_t, tree, tree, tree, bool, struct obstack *); static void set_nonincremental_init (struct obstack *); static void set_nonincremental_init_from_string (tree, struct obstack *); static tree find_init_member (tree, struct obstack *); static void readonly_warning (tree, enum lvalue_use); static int lvalue_or_else (location_t, const_tree, enum lvalue_use); static void record_maybe_used_decl (tree); static int comptypes_internal (const_tree, const_tree, bool *, bool *); /* Return true if EXP is a null pointer constant, false otherwise. */ static bool null_pointer_constant_p (const_tree expr) { /* This should really operate on c_expr structures, but they aren't yet available everywhere required. */ tree type = TREE_TYPE (expr); return (TREE_CODE (expr) == INTEGER_CST && !TREE_OVERFLOW (expr) && integer_zerop (expr) && (INTEGRAL_TYPE_P (type) || (TREE_CODE (type) == POINTER_TYPE && VOID_TYPE_P (TREE_TYPE (type)) && TYPE_QUALS (TREE_TYPE (type)) == TYPE_UNQUALIFIED))); } /* EXPR may appear in an unevaluated part of an integer constant expression, but not in an evaluated part. Wrap it in a C_MAYBE_CONST_EXPR, or mark it with TREE_OVERFLOW if it is just an INTEGER_CST and we cannot create a C_MAYBE_CONST_EXPR. */ static tree note_integer_operands (tree expr) { tree ret; if (TREE_CODE (expr) == INTEGER_CST && in_late_binary_op) { ret = copy_node (expr); TREE_OVERFLOW (ret) = 1; } else { ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr), NULL_TREE, expr); C_MAYBE_CONST_EXPR_INT_OPERANDS (ret) = 1; } return ret; } /* Having checked whether EXPR may appear in an unevaluated part of an integer constant expression and found that it may, remove any C_MAYBE_CONST_EXPR noting this fact and return the resulting expression. */ static inline tree remove_c_maybe_const_expr (tree expr) { if (TREE_CODE (expr) == C_MAYBE_CONST_EXPR) return C_MAYBE_CONST_EXPR_EXPR (expr); else return expr; } /* This is a cache to hold if two types are compatible or not. */ struct tagged_tu_seen_cache { const struct tagged_tu_seen_cache * next; const_tree t1; const_tree t2; /* The return value of tagged_types_tu_compatible_p if we had seen these two types already. */ int val; }; static const struct tagged_tu_seen_cache * tagged_tu_seen_base; static void free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *); /* Do `exp = require_complete_type (loc, exp);' to make sure exp does not have an incomplete type. (That includes void types.) LOC is the location of the use. */ tree require_complete_type (location_t loc, tree value) { tree type = TREE_TYPE (value); if (error_operand_p (value)) return error_mark_node; /* First, detect a valid value with a complete type. */ if (COMPLETE_TYPE_P (type)) return value; c_incomplete_type_error (loc, value, type); return error_mark_node; } /* Print an error message for invalid use of an incomplete type. VALUE is the expression that was used (or 0 if that isn't known) and TYPE is the type that was invalid. LOC is the location for the error. */ void c_incomplete_type_error (location_t loc, const_tree value, const_tree type) { /* Avoid duplicate error message. */ if (TREE_CODE (type) == ERROR_MARK) return; if (value != NULL_TREE && (VAR_P (value) || TREE_CODE (value) == PARM_DECL)) error_at (loc, "%qD has an incomplete type %qT", value, type); else { retry: /* We must print an error message. Be clever about what it says. */ switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: break; case VOID_TYPE: error_at (loc, "invalid use of void expression"); return; case ARRAY_TYPE: if (TYPE_DOMAIN (type)) { if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL) { error_at (loc, "invalid use of flexible array member"); return; } type = TREE_TYPE (type); goto retry; } error_at (loc, "invalid use of array with unspecified bounds"); return; default: gcc_unreachable (); } if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) error_at (loc, "invalid use of undefined type %qT", type); else /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */ error_at (loc, "invalid use of incomplete typedef %qT", type); } } /* Given a type, apply default promotions wrt unnamed function arguments and return the new type. */ tree c_type_promotes_to (tree type) { tree ret = NULL_TREE; if (TYPE_MAIN_VARIANT (type) == float_type_node) ret = double_type_node; else if (c_promoting_integer_type_p (type)) { /* Preserve unsignedness if not really getting any wider. */ if (TYPE_UNSIGNED (type) && (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))) ret = unsigned_type_node; else ret = integer_type_node; } if (ret != NULL_TREE) return (TYPE_ATOMIC (type) ? c_build_qualified_type (ret, TYPE_QUAL_ATOMIC) : ret); return type; } /* Return true if between two named address spaces, whether there is a superset named address space that encompasses both address spaces. If there is a superset, return which address space is the superset. */ static bool addr_space_superset (addr_space_t as1, addr_space_t as2, addr_space_t *common) { if (as1 == as2) { *common = as1; return true; } else if (targetm.addr_space.subset_p (as1, as2)) { *common = as2; return true; } else if (targetm.addr_space.subset_p (as2, as1)) { *common = as1; return true; } else return false; } /* Return a variant of TYPE which has all the type qualifiers of LIKE as well as those of TYPE. */ static tree qualify_type (tree type, tree like) { addr_space_t as_type = TYPE_ADDR_SPACE (type); addr_space_t as_like = TYPE_ADDR_SPACE (like); addr_space_t as_common; /* If the two named address spaces are different, determine the common superset address space. If there isn't one, raise an error. */ if (!addr_space_superset (as_type, as_like, &as_common)) { as_common = as_type; error ("%qT and %qT are in disjoint named address spaces", type, like); } return c_build_qualified_type (type, TYPE_QUALS_NO_ADDR_SPACE (type) | TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (like) | ENCODE_QUAL_ADDR_SPACE (as_common)); } /* Return true iff the given tree T is a variable length array. */ bool c_vla_type_p (const_tree t) { if (TREE_CODE (t) == ARRAY_TYPE && C_TYPE_VARIABLE_SIZE (t)) return true; return false; } /* Return the composite type of two compatible types. We assume that comptypes has already been done and returned nonzero; if that isn't so, this may crash. In particular, we assume that qualifiers match. */ tree composite_type (tree t1, tree t2) { enum tree_code code1; enum tree_code code2; tree attributes; /* Save time if the two types are the same. */ if (t1 == t2) return t1; /* If one type is nonsense, use the other. */ if (t1 == error_mark_node) return t2; if (t2 == error_mark_node) return t1; code1 = TREE_CODE (t1); code2 = TREE_CODE (t2); /* Merge the attributes. */ attributes = targetm.merge_type_attributes (t1, t2); /* If one is an enumerated type and the other is the compatible integer type, the composite type might be either of the two (DR#013 question 3). For consistency, use the enumerated type as the composite type. */ if (code1 == ENUMERAL_TYPE && code2 == INTEGER_TYPE) return t1; if (code2 == ENUMERAL_TYPE && code1 == INTEGER_TYPE) return t2; gcc_assert (code1 == code2); switch (code1) { case POINTER_TYPE: /* For two pointers, do this recursively on the target type. */ { tree pointed_to_1 = TREE_TYPE (t1); tree pointed_to_2 = TREE_TYPE (t2); tree target = composite_type (pointed_to_1, pointed_to_2); t1 = build_pointer_type_for_mode (target, TYPE_MODE (t1), false); t1 = build_type_attribute_variant (t1, attributes); return qualify_type (t1, t2); } case ARRAY_TYPE: { tree elt = composite_type (TREE_TYPE (t1), TREE_TYPE (t2)); int quals; tree unqual_elt; tree d1 = TYPE_DOMAIN (t1); tree d2 = TYPE_DOMAIN (t2); bool d1_variable, d2_variable; bool d1_zero, d2_zero; bool t1_complete, t2_complete; /* We should not have any type quals on arrays at all. */ gcc_assert (!TYPE_QUALS_NO_ADDR_SPACE (t1) && !TYPE_QUALS_NO_ADDR_SPACE (t2)); t1_complete = COMPLETE_TYPE_P (t1); t2_complete = COMPLETE_TYPE_P (t2); d1_zero = d1 == NULL_TREE || !TYPE_MAX_VALUE (d1); d2_zero = d2 == NULL_TREE || !TYPE_MAX_VALUE (d2); d1_variable = (!d1_zero && (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST || TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST)); d2_variable = (!d2_zero && (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST || TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST)); d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1)); d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2)); /* Save space: see if the result is identical to one of the args. */ if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1) && (d2_variable || d2_zero || !d1_variable)) return build_type_attribute_variant (t1, attributes); if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2) && (d1_variable || d1_zero || !d2_variable)) return build_type_attribute_variant (t2, attributes); if (elt == TREE_TYPE (t1) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1)) return build_type_attribute_variant (t1, attributes); if (elt == TREE_TYPE (t2) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1)) return build_type_attribute_variant (t2, attributes); /* Merge the element types, and have a size if either arg has one. We may have qualifiers on the element types. To set up TYPE_MAIN_VARIANT correctly, we need to form the composite of the unqualified types and add the qualifiers back at the end. */ quals = TYPE_QUALS (strip_array_types (elt)); unqual_elt = c_build_qualified_type (elt, TYPE_UNQUALIFIED); t1 = build_array_type (unqual_elt, TYPE_DOMAIN ((TYPE_DOMAIN (t1) && (d2_variable || d2_zero || !d1_variable)) ? t1 : t2)); /* Ensure a composite type involving a zero-length array type is a zero-length type not an incomplete type. */ if (d1_zero && d2_zero && (t1_complete || t2_complete) && !COMPLETE_TYPE_P (t1)) { TYPE_SIZE (t1) = bitsize_zero_node; TYPE_SIZE_UNIT (t1) = size_zero_node; } t1 = c_build_qualified_type (t1, quals); return build_type_attribute_variant (t1, attributes); } case ENUMERAL_TYPE: case RECORD_TYPE: case UNION_TYPE: if (attributes != NULL) { /* Try harder not to create a new aggregate type. */ if (attribute_list_equal (TYPE_ATTRIBUTES (t1), attributes)) return t1; if (attribute_list_equal (TYPE_ATTRIBUTES (t2), attributes)) return t2; } return build_type_attribute_variant (t1, attributes); case FUNCTION_TYPE: /* Function types: prefer the one that specified arg types. If both do, merge the arg types. Also merge the return types. */ { tree valtype = composite_type (TREE_TYPE (t1), TREE_TYPE (t2)); tree p1 = TYPE_ARG_TYPES (t1); tree p2 = TYPE_ARG_TYPES (t2); int len; tree newargs, n; int i; /* Save space: see if the result is identical to one of the args. */ if (valtype == TREE_TYPE (t1) && !TYPE_ARG_TYPES (t2)) return build_type_attribute_variant (t1, attributes); if (valtype == TREE_TYPE (t2) && !TYPE_ARG_TYPES (t1)) return build_type_attribute_variant (t2, attributes); /* Simple way if one arg fails to specify argument types. */ if (TYPE_ARG_TYPES (t1) == NULL_TREE) { t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2)); t1 = build_type_attribute_variant (t1, attributes); return qualify_type (t1, t2); } if (TYPE_ARG_TYPES (t2) == NULL_TREE) { t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1)); t1 = build_type_attribute_variant (t1, attributes); return qualify_type (t1, t2); } /* If both args specify argument types, we must merge the two lists, argument by argument. */ for (len = 0, newargs = p1; newargs && newargs != void_list_node; len++, newargs = TREE_CHAIN (newargs)) ; for (i = 0; i < len; i++) newargs = tree_cons (NULL_TREE, NULL_TREE, newargs); n = newargs; for (; p1 && p1 != void_list_node; p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n)) { /* A null type means arg type is not specified. Take whatever the other function type has. */ if (TREE_VALUE (p1) == NULL_TREE) { TREE_VALUE (n) = TREE_VALUE (p2); goto parm_done; } if (TREE_VALUE (p2) == NULL_TREE) { TREE_VALUE (n) = TREE_VALUE (p1); goto parm_done; } /* Given wait (union {union wait *u; int *i} *) and wait (union wait *), prefer union wait * as type of parm. */ if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE && TREE_VALUE (p1) != TREE_VALUE (p2)) { tree memb; tree mv2 = TREE_VALUE (p2); if (mv2 && mv2 != error_mark_node && TREE_CODE (mv2) != ARRAY_TYPE) mv2 = TYPE_MAIN_VARIANT (mv2); for (memb = TYPE_FIELDS (TREE_VALUE (p1)); memb; memb = DECL_CHAIN (memb)) { tree mv3 = TREE_TYPE (memb); if (mv3 && mv3 != error_mark_node && TREE_CODE (mv3) != ARRAY_TYPE) mv3 = TYPE_MAIN_VARIANT (mv3); if (comptypes (mv3, mv2)) { TREE_VALUE (n) = composite_type (TREE_TYPE (memb), TREE_VALUE (p2)); pedwarn (input_location, OPT_Wpedantic, "function types not truly compatible in ISO C"); goto parm_done; } } } if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE && TREE_VALUE (p2) != TREE_VALUE (p1)) { tree memb; tree mv1 = TREE_VALUE (p1); if (mv1 && mv1 != error_mark_node && TREE_CODE (mv1) != ARRAY_TYPE) mv1 = TYPE_MAIN_VARIANT (mv1); for (memb = TYPE_FIELDS (TREE_VALUE (p2)); memb; memb = DECL_CHAIN (memb)) { tree mv3 = TREE_TYPE (memb); if (mv3 && mv3 != error_mark_node && TREE_CODE (mv3) != ARRAY_TYPE) mv3 = TYPE_MAIN_VARIANT (mv3); if (comptypes (mv3, mv1)) { TREE_VALUE (n) = composite_type (TREE_TYPE (memb), TREE_VALUE (p1)); pedwarn (input_location, OPT_Wpedantic, "function types not truly compatible in ISO C"); goto parm_done; } } } TREE_VALUE (n) = composite_type (TREE_VALUE (p1), TREE_VALUE (p2)); parm_done: ; } t1 = build_function_type (valtype, newargs); t1 = qualify_type (t1, t2); } /* FALLTHRU */ default: return build_type_attribute_variant (t1, attributes); } } /* Return the type of a conditional expression between pointers to possibly differently qualified versions of compatible types. We assume that comp_target_types has already been done and returned nonzero; if that isn't so, this may crash. */ static tree common_pointer_type (tree t1, tree t2) { tree attributes; tree pointed_to_1, mv1; tree pointed_to_2, mv2; tree target; unsigned target_quals; addr_space_t as1, as2, as_common; int quals1, quals2; /* Save time if the two types are the same. */ if (t1 == t2) return t1; /* If one type is nonsense, use the other. */ if (t1 == error_mark_node) return t2; if (t2 == error_mark_node) return t1; gcc_assert (TREE_CODE (t1) == POINTER_TYPE && TREE_CODE (t2) == POINTER_TYPE); /* Merge the attributes. */ attributes = targetm.merge_type_attributes (t1, t2); /* Find the composite type of the target types, and combine the qualifiers of the two types' targets. Do not lose qualifiers on array element types by taking the TYPE_MAIN_VARIANT. */ mv1 = pointed_to_1 = TREE_TYPE (t1); mv2 = pointed_to_2 = TREE_TYPE (t2); if (TREE_CODE (mv1) != ARRAY_TYPE) mv1 = TYPE_MAIN_VARIANT (pointed_to_1); if (TREE_CODE (mv2) != ARRAY_TYPE) mv2 = TYPE_MAIN_VARIANT (pointed_to_2); target = composite_type (mv1, mv2); /* Strip array types to get correct qualifier for pointers to arrays */ quals1 = TYPE_QUALS_NO_ADDR_SPACE (strip_array_types (pointed_to_1)); quals2 = TYPE_QUALS_NO_ADDR_SPACE (strip_array_types (pointed_to_2)); /* For function types do not merge const qualifiers, but drop them if used inconsistently. The middle-end uses these to mark const and noreturn functions. */ if (TREE_CODE (pointed_to_1) == FUNCTION_TYPE) target_quals = (quals1 & quals2); else target_quals = (quals1 | quals2); /* If the two named address spaces are different, determine the common superset address space. This is guaranteed to exist due to the assumption that comp_target_type returned non-zero. */ as1 = TYPE_ADDR_SPACE (pointed_to_1); as2 = TYPE_ADDR_SPACE (pointed_to_2); if (!addr_space_superset (as1, as2, &as_common)) gcc_unreachable (); target_quals |= ENCODE_QUAL_ADDR_SPACE (as_common); t1 = build_pointer_type (c_build_qualified_type (target, target_quals)); return build_type_attribute_variant (t1, attributes); } /* Return the common type for two arithmetic types under the usual arithmetic conversions. The default conversions have already been applied, and enumerated types converted to their compatible integer types. The resulting type is unqualified and has no attributes. This is the type for the result of most arithmetic operations if the operands have the given two types. */ static tree c_common_type (tree t1, tree t2) { enum tree_code code1; enum tree_code code2; /* If one type is nonsense, use the other. */ if (t1 == error_mark_node) return t2; if (t2 == error_mark_node) return t1; if (TYPE_QUALS (t1) != TYPE_UNQUALIFIED) t1 = TYPE_MAIN_VARIANT (t1); if (TYPE_QUALS (t2) != TYPE_UNQUALIFIED) t2 = TYPE_MAIN_VARIANT (t2); if (TYPE_ATTRIBUTES (t1) != NULL_TREE) t1 = build_type_attribute_variant (t1, NULL_TREE); if (TYPE_ATTRIBUTES (t2) != NULL_TREE) t2 = build_type_attribute_variant (t2, NULL_TREE); /* Save time if the two types are the same. */ if (t1 == t2) return t1; code1 = TREE_CODE (t1); code2 = TREE_CODE (t2); gcc_assert (code1 == VECTOR_TYPE || code1 == COMPLEX_TYPE || code1 == FIXED_POINT_TYPE || code1 == REAL_TYPE || code1 == INTEGER_TYPE); gcc_assert (code2 == VECTOR_TYPE || code2 == COMPLEX_TYPE || code2 == FIXED_POINT_TYPE || code2 == REAL_TYPE || code2 == INTEGER_TYPE); /* When one operand is a decimal float type, the other operand cannot be a generic float type or a complex type. We also disallow vector types here. */ if ((DECIMAL_FLOAT_TYPE_P (t1) || DECIMAL_FLOAT_TYPE_P (t2)) && !(DECIMAL_FLOAT_TYPE_P (t1) && DECIMAL_FLOAT_TYPE_P (t2))) { if (code1 == VECTOR_TYPE || code2 == VECTOR_TYPE) { error ("can%'t mix operands of decimal float and vector types"); return error_mark_node; } if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE) { error ("can%'t mix operands of decimal float and complex types"); return error_mark_node; } if (code1 == REAL_TYPE && code2 == REAL_TYPE) { error ("can%'t mix operands of decimal float and other float types"); return error_mark_node; } } /* If one type is a vector type, return that type. (How the usual arithmetic conversions apply to the vector types extension is not precisely specified.) */ if (code1 == VECTOR_TYPE) return t1; if (code2 == VECTOR_TYPE) return t2; /* If one type is complex, form the common type of the non-complex components, then make that complex. Use T1 or T2 if it is the required type. */ if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE) { tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1; tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2; tree subtype = c_common_type (subtype1, subtype2); if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype) return t1; else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype) return t2; else return build_complex_type (subtype); } /* If only one is real, use it as the result. */ if (code1 == REAL_TYPE && code2 != REAL_TYPE) return t1; if (code2 == REAL_TYPE && code1 != REAL_TYPE) return t2; /* If both are real and either are decimal floating point types, use the decimal floating point type with the greater precision. */ if (code1 == REAL_TYPE && code2 == REAL_TYPE) { if (TYPE_MAIN_VARIANT (t1) == dfloat128_type_node || TYPE_MAIN_VARIANT (t2) == dfloat128_type_node) return dfloat128_type_node; else if (TYPE_MAIN_VARIANT (t1) == dfloat64_type_node || TYPE_MAIN_VARIANT (t2) == dfloat64_type_node) return dfloat64_type_node; else if (TYPE_MAIN_VARIANT (t1) == dfloat32_type_node || TYPE_MAIN_VARIANT (t2) == dfloat32_type_node) return dfloat32_type_node; } /* Deal with fixed-point types. */ if (code1 == FIXED_POINT_TYPE || code2 == FIXED_POINT_TYPE) { unsigned int unsignedp = 0, satp = 0; scalar_mode m1, m2; unsigned int fbit1, ibit1, fbit2, ibit2, max_fbit, max_ibit; m1 = SCALAR_TYPE_MODE (t1); m2 = SCALAR_TYPE_MODE (t2); /* If one input type is saturating, the result type is saturating. */ if (TYPE_SATURATING (t1) || TYPE_SATURATING (t2)) satp = 1; /* If both fixed-point types are unsigned, the result type is unsigned. When mixing fixed-point and integer types, follow the sign of the fixed-point type. Otherwise, the result type is signed. */ if ((TYPE_UNSIGNED (t1) && TYPE_UNSIGNED (t2) && code1 == FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE) || (code1 == FIXED_POINT_TYPE && code2 != FIXED_POINT_TYPE && TYPE_UNSIGNED (t1)) || (code1 != FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t2))) unsignedp = 1; /* The result type is signed. */ if (unsignedp == 0) { /* If the input type is unsigned, we need to convert to the signed type. */ if (code1 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t1)) { enum mode_class mclass = (enum mode_class) 0; if (GET_MODE_CLASS (m1) == MODE_UFRACT) mclass = MODE_FRACT; else if (GET_MODE_CLASS (m1) == MODE_UACCUM) mclass = MODE_ACCUM; else gcc_unreachable (); m1 = as_a <scalar_mode> (mode_for_size (GET_MODE_PRECISION (m1), mclass, 0)); } if (code2 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t2)) { enum mode_class mclass = (enum mode_class) 0; if (GET_MODE_CLASS (m2) == MODE_UFRACT) mclass = MODE_FRACT; else if (GET_MODE_CLASS (m2) == MODE_UACCUM) mclass = MODE_ACCUM; else gcc_unreachable (); m2 = as_a <scalar_mode> (mode_for_size (GET_MODE_PRECISION (m2), mclass, 0)); } } if (code1 == FIXED_POINT_TYPE) { fbit1 = GET_MODE_FBIT (m1); ibit1 = GET_MODE_IBIT (m1); } else { fbit1 = 0; /* Signed integers need to subtract one sign bit. */ ibit1 = TYPE_PRECISION (t1) - (!TYPE_UNSIGNED (t1)); } if (code2 == FIXED_POINT_TYPE) { fbit2 = GET_MODE_FBIT (m2); ibit2 = GET_MODE_IBIT (m2); } else { fbit2 = 0; /* Signed integers need to subtract one sign bit. */ ibit2 = TYPE_PRECISION (t2) - (!TYPE_UNSIGNED (t2)); } max_ibit = ibit1 >= ibit2 ? ibit1 : ibit2; max_fbit = fbit1 >= fbit2 ? fbit1 : fbit2; return c_common_fixed_point_type_for_size (max_ibit, max_fbit, unsignedp, satp); } /* Both real or both integers; use the one with greater precision. */ if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2)) return t1; else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1)) return t2; /* Same precision. Prefer long longs to longs to ints when the same precision, following the C99 rules on integer type rank (which are equivalent to the C90 rules for C90 types). */ if (TYPE_MAIN_VARIANT (t1) == long_long_unsigned_type_node || TYPE_MAIN_VARIANT (t2) == long_long_unsigned_type_node) return long_long_unsigned_type_node; if (TYPE_MAIN_VARIANT (t1) == long_long_integer_type_node || TYPE_MAIN_VARIANT (t2) == long_long_integer_type_node) { if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2)) return long_long_unsigned_type_node; else return long_long_integer_type_node; } if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node || TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node) return long_unsigned_type_node; if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node || TYPE_MAIN_VARIANT (t2) == long_integer_type_node) { /* But preserve unsignedness from the other type, since long cannot hold all the values of an unsigned int. */ if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2)) return long_unsigned_type_node; else return long_integer_type_node; } /* For floating types of the same TYPE_PRECISION (which we here assume means either the same set of values, or sets of values neither a subset of the other, with behavior being undefined in the latter case), follow the rules from TS 18661-3: prefer interchange types _FloatN, then standard types long double, double, float, then extended types _FloatNx. For extended types, check them starting with _Float128x as that seems most consistent in spirit with preferring long double to double; for interchange types, also check in that order for consistency although it's not possible for more than one of them to have the same precision. */ tree mv1 = TYPE_MAIN_VARIANT (t1); tree mv2 = TYPE_MAIN_VARIANT (t2); for (int i = NUM_FLOATN_TYPES - 1; i >= 0; i--) if (mv1 == FLOATN_TYPE_NODE (i) || mv2 == FLOATN_TYPE_NODE (i)) return FLOATN_TYPE_NODE (i); /* Likewise, prefer long double to double even if same size. */ if (mv1 == long_double_type_node || mv2 == long_double_type_node) return long_double_type_node; /* Likewise, prefer double to float even if same size. We got a couple of embedded targets with 32 bit doubles, and the pdp11 might have 64 bit floats. */ if (mv1 == double_type_node || mv2 == double_type_node) return double_type_node; if (mv1 == float_type_node || mv2 == float_type_node) return float_type_node; for (int i = NUM_FLOATNX_TYPES - 1; i >= 0; i--) if (mv1 == FLOATNX_TYPE_NODE (i) || mv2 == FLOATNX_TYPE_NODE (i)) return FLOATNX_TYPE_NODE (i); /* Otherwise prefer the unsigned one. */ if (TYPE_UNSIGNED (t1)) return t1; else return t2; } /* Wrapper around c_common_type that is used by c-common.c and other front end optimizations that remove promotions. ENUMERAL_TYPEs are allowed here and are converted to their compatible integer types. BOOLEAN_TYPEs are allowed here and return either boolean_type_node or preferably a non-Boolean type as the common type. */ tree common_type (tree t1, tree t2) { if (TREE_CODE (t1) == ENUMERAL_TYPE) t1 = c_common_type_for_size (TYPE_PRECISION (t1), 1); if (TREE_CODE (t2) == ENUMERAL_TYPE) t2 = c_common_type_for_size (TYPE_PRECISION (t2), 1); /* If both types are BOOLEAN_TYPE, then return boolean_type_node. */ if (TREE_CODE (t1) == BOOLEAN_TYPE && TREE_CODE (t2) == BOOLEAN_TYPE) return boolean_type_node; /* If either type is BOOLEAN_TYPE, then return the other. */ if (TREE_CODE (t1) == BOOLEAN_TYPE) return t2; if (TREE_CODE (t2) == BOOLEAN_TYPE) return t1; return c_common_type (t1, t2); } /* Return 1 if TYPE1 and TYPE2 are compatible types for assignment or various other operations. Return 2 if they are compatible but a warning may be needed if you use them together. */ int comptypes (tree type1, tree type2) { const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base; int val; val = comptypes_internal (type1, type2, NULL, NULL); free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1); return val; } /* Like comptypes, but if it returns non-zero because enum and int are compatible, it sets *ENUM_AND_INT_P to true. */ static int comptypes_check_enum_int (tree type1, tree type2, bool *enum_and_int_p) { const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base; int val; val = comptypes_internal (type1, type2, enum_and_int_p, NULL); free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1); return val; } /* Like comptypes, but if it returns nonzero for different types, it sets *DIFFERENT_TYPES_P to true. */ int comptypes_check_different_types (tree type1, tree type2, bool *different_types_p) { const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base; int val; val = comptypes_internal (type1, type2, NULL, different_types_p); free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1); return val; } /* Return 1 if TYPE1 and TYPE2 are compatible types for assignment or various other operations. Return 2 if they are compatible but a warning may be needed if you use them together. If ENUM_AND_INT_P is not NULL, and one type is an enum and the other a compatible integer type, then this sets *ENUM_AND_INT_P to true; *ENUM_AND_INT_P is never set to false. If DIFFERENT_TYPES_P is not NULL, and the types are compatible but different enough not to be permitted in C11 typedef redeclarations, then this sets *DIFFERENT_TYPES_P to true; *DIFFERENT_TYPES_P is never set to false, but may or may not be set if the types are incompatible. This differs from comptypes, in that we don't free the seen types. */ static int comptypes_internal (const_tree type1, const_tree type2, bool *enum_and_int_p, bool *different_types_p) { const_tree t1 = type1; const_tree t2 = type2; int attrval, val; /* Suppress errors caused by previously reported errors. */ if (t1 == t2 || !t1 || !t2 || TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK) return 1; /* Enumerated types are compatible with integer types, but this is not transitive: two enumerated types in the same translation unit are compatible with each other only if they are the same type. */ if (TREE_CODE (t1) == ENUMERAL_TYPE && TREE_CODE (t2) != ENUMERAL_TYPE) { t1 = c_common_type_for_size (TYPE_PRECISION (t1), TYPE_UNSIGNED (t1)); if (TREE_CODE (t2) != VOID_TYPE) { if (enum_and_int_p != NULL) *enum_and_int_p = true; if (different_types_p != NULL) *different_types_p = true; } } else if (TREE_CODE (t2) == ENUMERAL_TYPE && TREE_CODE (t1) != ENUMERAL_TYPE) { t2 = c_common_type_for_size (TYPE_PRECISION (t2), TYPE_UNSIGNED (t2)); if (TREE_CODE (t1) != VOID_TYPE) { if (enum_and_int_p != NULL) *enum_and_int_p = true; if (different_types_p != NULL) *different_types_p = true; } } if (t1 == t2) return 1; /* Different classes of types can't be compatible. */ if (TREE_CODE (t1) != TREE_CODE (t2)) return 0; /* Qualifiers must match. C99 6.7.3p9 */ if (TYPE_QUALS (t1) != TYPE_QUALS (t2)) return 0; /* Allow for two different type nodes which have essentially the same definition. Note that we already checked for equality of the type qualifiers (just above). */ if (TREE_CODE (t1) != ARRAY_TYPE && TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2)) return 1; /* 1 if no need for warning yet, 2 if warning cause has been seen. */ if (!(attrval = comp_type_attributes (t1, t2))) return 0; /* 1 if no need for warning yet, 2 if warning cause has been seen. */ val = 0; switch (TREE_CODE (t1)) { case INTEGER_TYPE: case FIXED_POINT_TYPE: case REAL_TYPE: /* With these nodes, we can't determine type equivalence by looking at what is stored in the nodes themselves, because two nodes might have different TYPE_MAIN_VARIANTs but still represent the same type. For example, wchar_t and int could have the same properties (TYPE_PRECISION, TYPE_MIN_VALUE, TYPE_MAX_VALUE, etc.), but have different TYPE_MAIN_VARIANTs and are distinct types. On the other hand, int and the following typedef typedef int INT __attribute((may_alias)); have identical properties, different TYPE_MAIN_VARIANTs, but represent the same type. The canonical type system keeps track of equivalence in this case, so we fall back on it. */ return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2); case POINTER_TYPE: /* Do not remove mode information. */ if (TYPE_MODE (t1) != TYPE_MODE (t2)) break; val = (TREE_TYPE (t1) == TREE_TYPE (t2) ? 1 : comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2), enum_and_int_p, different_types_p)); break; case FUNCTION_TYPE: val = function_types_compatible_p (t1, t2, enum_and_int_p, different_types_p); break; case ARRAY_TYPE: { tree d1 = TYPE_DOMAIN (t1); tree d2 = TYPE_DOMAIN (t2); bool d1_variable, d2_variable; bool d1_zero, d2_zero; val = 1; /* Target types must match incl. qualifiers. */ if (TREE_TYPE (t1) != TREE_TYPE (t2) && (val = comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2), enum_and_int_p, different_types_p)) == 0) return 0; if (different_types_p != NULL && (d1 == NULL_TREE) != (d2 == NULL_TREE)) *different_types_p = true; /* Sizes must match unless one is missing or variable. */ if (d1 == NULL_TREE || d2 == NULL_TREE || d1 == d2) break; d1_zero = !TYPE_MAX_VALUE (d1); d2_zero = !TYPE_MAX_VALUE (d2); d1_variable = (!d1_zero && (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST || TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST)); d2_variable = (!d2_zero && (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST || TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST)); d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1)); d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2)); if (different_types_p != NULL && d1_variable != d2_variable) *different_types_p = true; if (d1_variable || d2_variable) break; if (d1_zero && d2_zero) break; if (d1_zero || d2_zero || !tree_int_cst_equal (TYPE_MIN_VALUE (d1), TYPE_MIN_VALUE (d2)) || !tree_int_cst_equal (TYPE_MAX_VALUE (d1), TYPE_MAX_VALUE (d2))) val = 0; break; } case ENUMERAL_TYPE: case RECORD_TYPE: case UNION_TYPE: if (val != 1 && !same_translation_unit_p (t1, t2)) { tree a1 = TYPE_ATTRIBUTES (t1); tree a2 = TYPE_ATTRIBUTES (t2); if (! attribute_list_contained (a1, a2) && ! attribute_list_contained (a2, a1)) break; if (attrval != 2) return tagged_types_tu_compatible_p (t1, t2, enum_and_int_p, different_types_p); val = tagged_types_tu_compatible_p (t1, t2, enum_and_int_p, different_types_p); } break; case VECTOR_TYPE: val = (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2)) && comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2), enum_and_int_p, different_types_p)); break; default: break; } return attrval == 2 && val == 1 ? 2 : val; } /* Return 1 if TTL and TTR are pointers to types that are equivalent, ignoring their qualifiers, except for named address spaces. If the pointers point to different named addresses, then we must determine if one address space is a subset of the other. */ static int comp_target_types (location_t location, tree ttl, tree ttr) { int val; int val_ped; tree mvl = TREE_TYPE (ttl); tree mvr = TREE_TYPE (ttr); addr_space_t asl = TYPE_ADDR_SPACE (mvl); addr_space_t asr = TYPE_ADDR_SPACE (mvr); addr_space_t as_common; bool enum_and_int_p; /* Fail if pointers point to incompatible address spaces. */ if (!addr_space_superset (asl, asr, &as_common)) return 0; /* For pedantic record result of comptypes on arrays before losing qualifiers on the element type below. */ val_ped = 1; if (TREE_CODE (mvl) == ARRAY_TYPE && TREE_CODE (mvr) == ARRAY_TYPE) val_ped = comptypes (mvl, mvr); /* Qualifiers on element types of array types that are pointer targets are lost by taking their TYPE_MAIN_VARIANT. */ mvl = (TYPE_ATOMIC (strip_array_types (mvl)) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mvl), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mvl)); mvr = (TYPE_ATOMIC (strip_array_types (mvr)) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mvr), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mvr)); enum_and_int_p = false; val = comptypes_check_enum_int (mvl, mvr, &enum_and_int_p); if (val == 1 && val_ped != 1) pedwarn (location, OPT_Wpedantic, "pointers to arrays with different qualifiers " "are incompatible in ISO C"); if (val == 2) pedwarn (location, OPT_Wpedantic, "types are not quite compatible"); if (val == 1 && enum_and_int_p && warn_cxx_compat) warning_at (location, OPT_Wc___compat, "pointer target types incompatible in C++"); return val; } /* Subroutines of `comptypes'. */ /* Determine whether two trees derive from the same translation unit. If the CONTEXT chain ends in a null, that tree's context is still being parsed, so if two trees have context chains ending in null, they're in the same translation unit. */ bool same_translation_unit_p (const_tree t1, const_tree t2) { while (t1 && TREE_CODE (t1) != TRANSLATION_UNIT_DECL) switch (TREE_CODE_CLASS (TREE_CODE (t1))) { case tcc_declaration: t1 = DECL_CONTEXT (t1); break; case tcc_type: t1 = TYPE_CONTEXT (t1); break; case tcc_exceptional: t1 = BLOCK_SUPERCONTEXT (t1); break; /* assume block */ default: gcc_unreachable (); } while (t2 && TREE_CODE (t2) != TRANSLATION_UNIT_DECL) switch (TREE_CODE_CLASS (TREE_CODE (t2))) { case tcc_declaration: t2 = DECL_CONTEXT (t2); break; case tcc_type: t2 = TYPE_CONTEXT (t2); break; case tcc_exceptional: t2 = BLOCK_SUPERCONTEXT (t2); break; /* assume block */ default: gcc_unreachable (); } return t1 == t2; } /* Allocate the seen two types, assuming that they are compatible. */ static struct tagged_tu_seen_cache * alloc_tagged_tu_seen_cache (const_tree t1, const_tree t2) { struct tagged_tu_seen_cache *tu = XNEW (struct tagged_tu_seen_cache); tu->next = tagged_tu_seen_base; tu->t1 = t1; tu->t2 = t2; tagged_tu_seen_base = tu; /* The C standard says that two structures in different translation units are compatible with each other only if the types of their fields are compatible (among other things). We assume that they are compatible until proven otherwise when building the cache. An example where this can occur is: struct a { struct a *next; }; If we are comparing this against a similar struct in another TU, and did not assume they were compatible, we end up with an infinite loop. */ tu->val = 1; return tu; } /* Free the seen types until we get to TU_TIL. */ static void free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *tu_til) { const struct tagged_tu_seen_cache *tu = tagged_tu_seen_base; while (tu != tu_til) { const struct tagged_tu_seen_cache *const tu1 = (const struct tagged_tu_seen_cache *) tu; tu = tu1->next; free (CONST_CAST (struct tagged_tu_seen_cache *, tu1)); } tagged_tu_seen_base = tu_til; } /* Return 1 if two 'struct', 'union', or 'enum' types T1 and T2 are compatible. If the two types are not the same (which has been checked earlier), this can only happen when multiple translation units are being compiled. See C99 6.2.7 paragraph 1 for the exact rules. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in comptypes_internal. */ static int tagged_types_tu_compatible_p (const_tree t1, const_tree t2, bool *enum_and_int_p, bool *different_types_p) { tree s1, s2; bool needs_warning = false; /* We have to verify that the tags of the types are the same. This is harder than it looks because this may be a typedef, so we have to go look at the original type. It may even be a typedef of a typedef... In the case of compiler-created builtin structs the TYPE_DECL may be a dummy, with no DECL_ORIGINAL_TYPE. Don't fault. */ while (TYPE_NAME (t1) && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (t1))) t1 = DECL_ORIGINAL_TYPE (TYPE_NAME (t1)); while (TYPE_NAME (t2) && TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (t2))) t2 = DECL_ORIGINAL_TYPE (TYPE_NAME (t2)); /* C90 didn't have the requirement that the two tags be the same. */ if (flag_isoc99 && TYPE_NAME (t1) != TYPE_NAME (t2)) return 0; /* C90 didn't say what happened if one or both of the types were incomplete; we choose to follow C99 rules here, which is that they are compatible. */ if (TYPE_SIZE (t1) == NULL || TYPE_SIZE (t2) == NULL) return 1; { const struct tagged_tu_seen_cache * tts_i; for (tts_i = tagged_tu_seen_base; tts_i != NULL; tts_i = tts_i->next) if (tts_i->t1 == t1 && tts_i->t2 == t2) return tts_i->val; } switch (TREE_CODE (t1)) { case ENUMERAL_TYPE: { struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2); /* Speed up the case where the type values are in the same order. */ tree tv1 = TYPE_VALUES (t1); tree tv2 = TYPE_VALUES (t2); if (tv1 == tv2) { return 1; } for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2)) { if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2)) break; if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1) { tu->val = 0; return 0; } } if (tv1 == NULL_TREE && tv2 == NULL_TREE) { return 1; } if (tv1 == NULL_TREE || tv2 == NULL_TREE) { tu->val = 0; return 0; } if (list_length (TYPE_VALUES (t1)) != list_length (TYPE_VALUES (t2))) { tu->val = 0; return 0; } for (s1 = TYPE_VALUES (t1); s1; s1 = TREE_CHAIN (s1)) { s2 = purpose_member (TREE_PURPOSE (s1), TYPE_VALUES (t2)); if (s2 == NULL || simple_cst_equal (TREE_VALUE (s1), TREE_VALUE (s2)) != 1) { tu->val = 0; return 0; } } return 1; } case UNION_TYPE: { struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2); if (list_length (TYPE_FIELDS (t1)) != list_length (TYPE_FIELDS (t2))) { tu->val = 0; return 0; } /* Speed up the common case where the fields are in the same order. */ for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2; s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2)) { int result; if (DECL_NAME (s1) != DECL_NAME (s2)) break; result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2), enum_and_int_p, different_types_p); if (result != 1 && !DECL_NAME (s1)) break; if (result == 0) { tu->val = 0; return 0; } if (result == 2) needs_warning = true; if (TREE_CODE (s1) == FIELD_DECL && simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1), DECL_FIELD_BIT_OFFSET (s2)) != 1) { tu->val = 0; return 0; } } if (!s1 && !s2) { tu->val = needs_warning ? 2 : 1; return tu->val; } for (s1 = TYPE_FIELDS (t1); s1; s1 = DECL_CHAIN (s1)) { bool ok = false; for (s2 = TYPE_FIELDS (t2); s2; s2 = DECL_CHAIN (s2)) if (DECL_NAME (s1) == DECL_NAME (s2)) { int result; result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2), enum_and_int_p, different_types_p); if (result != 1 && !DECL_NAME (s1)) continue; if (result == 0) { tu->val = 0; return 0; } if (result == 2) needs_warning = true; if (TREE_CODE (s1) == FIELD_DECL && simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1), DECL_FIELD_BIT_OFFSET (s2)) != 1) break; ok = true; break; } if (!ok) { tu->val = 0; return 0; } } tu->val = needs_warning ? 2 : 10; return tu->val; } case RECORD_TYPE: { struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2); for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2; s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2)) { int result; if (TREE_CODE (s1) != TREE_CODE (s2) || DECL_NAME (s1) != DECL_NAME (s2)) break; result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2), enum_and_int_p, different_types_p); if (result == 0) break; if (result == 2) needs_warning = true; if (TREE_CODE (s1) == FIELD_DECL && simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1), DECL_FIELD_BIT_OFFSET (s2)) != 1) break; } if (s1 && s2) tu->val = 0; else tu->val = needs_warning ? 2 : 1; return tu->val; } default: gcc_unreachable (); } } /* Return 1 if two function types F1 and F2 are compatible. If either type specifies no argument types, the other must specify a fixed number of self-promoting arg types. Otherwise, if one type specifies only the number of arguments, the other must specify that number of self-promoting arg types. Otherwise, the argument types must match. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in comptypes_internal. */ static int function_types_compatible_p (const_tree f1, const_tree f2, bool *enum_and_int_p, bool *different_types_p) { tree args1, args2; /* 1 if no need for warning yet, 2 if warning cause has been seen. */ int val = 1; int val1; tree ret1, ret2; ret1 = TREE_TYPE (f1); ret2 = TREE_TYPE (f2); /* 'volatile' qualifiers on a function's return type used to mean the function is noreturn. */ if (TYPE_VOLATILE (ret1) != TYPE_VOLATILE (ret2)) pedwarn (input_location, 0, "function return types not compatible due to %<volatile%>"); if (TYPE_VOLATILE (ret1)) ret1 = build_qualified_type (TYPE_MAIN_VARIANT (ret1), TYPE_QUALS (ret1) & ~TYPE_QUAL_VOLATILE); if (TYPE_VOLATILE (ret2)) ret2 = build_qualified_type (TYPE_MAIN_VARIANT (ret2), TYPE_QUALS (ret2) & ~TYPE_QUAL_VOLATILE); val = comptypes_internal (ret1, ret2, enum_and_int_p, different_types_p); if (val == 0) return 0; args1 = TYPE_ARG_TYPES (f1); args2 = TYPE_ARG_TYPES (f2); if (different_types_p != NULL && (args1 == NULL_TREE) != (args2 == NULL_TREE)) *different_types_p = true; /* An unspecified parmlist matches any specified parmlist whose argument types don't need default promotions. */ if (args1 == NULL_TREE) { if (!self_promoting_args_p (args2)) return 0; /* If one of these types comes from a non-prototype fn definition, compare that with the other type's arglist. If they don't match, ask for a warning (but no error). */ if (TYPE_ACTUAL_ARG_TYPES (f1) && type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1), enum_and_int_p, different_types_p) != 1) val = 2; return val; } if (args2 == NULL_TREE) { if (!self_promoting_args_p (args1)) return 0; if (TYPE_ACTUAL_ARG_TYPES (f2) && type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2), enum_and_int_p, different_types_p) != 1) val = 2; return val; } /* Both types have argument lists: compare them and propagate results. */ val1 = type_lists_compatible_p (args1, args2, enum_and_int_p, different_types_p); return val1 != 1 ? val1 : val; } /* Check two lists of types for compatibility, returning 0 for incompatible, 1 for compatible, or 2 for compatible with warning. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in comptypes_internal. */ static int type_lists_compatible_p (const_tree args1, const_tree args2, bool *enum_and_int_p, bool *different_types_p) { /* 1 if no need for warning yet, 2 if warning cause has been seen. */ int val = 1; int newval = 0; while (1) { tree a1, mv1, a2, mv2; if (args1 == NULL_TREE && args2 == NULL_TREE) return val; /* If one list is shorter than the other, they fail to match. */ if (args1 == NULL_TREE || args2 == NULL_TREE) return 0; mv1 = a1 = TREE_VALUE (args1); mv2 = a2 = TREE_VALUE (args2); if (mv1 && mv1 != error_mark_node && TREE_CODE (mv1) != ARRAY_TYPE) mv1 = (TYPE_ATOMIC (mv1) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mv1), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mv1)); if (mv2 && mv2 != error_mark_node && TREE_CODE (mv2) != ARRAY_TYPE) mv2 = (TYPE_ATOMIC (mv2) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mv2), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mv2)); /* A null pointer instead of a type means there is supposed to be an argument but nothing is specified about what type it has. So match anything that self-promotes. */ if (different_types_p != NULL && (a1 == NULL_TREE) != (a2 == NULL_TREE)) *different_types_p = true; if (a1 == NULL_TREE) { if (c_type_promotes_to (a2) != a2) return 0; } else if (a2 == NULL_TREE) { if (c_type_promotes_to (a1) != a1) return 0; } /* If one of the lists has an error marker, ignore this arg. */ else if (TREE_CODE (a1) == ERROR_MARK || TREE_CODE (a2) == ERROR_MARK) ; else if (!(newval = comptypes_internal (mv1, mv2, enum_and_int_p, different_types_p))) { if (different_types_p != NULL) *different_types_p = true; /* Allow wait (union {union wait *u; int *i} *) and wait (union wait *) to be compatible. */ if (TREE_CODE (a1) == UNION_TYPE && (TYPE_NAME (a1) == NULL_TREE || TYPE_TRANSPARENT_AGGR (a1)) && TREE_CODE (TYPE_SIZE (a1)) == INTEGER_CST && tree_int_cst_equal (TYPE_SIZE (a1), TYPE_SIZE (a2))) { tree memb; for (memb = TYPE_FIELDS (a1); memb; memb = DECL_CHAIN (memb)) { tree mv3 = TREE_TYPE (memb); if (mv3 && mv3 != error_mark_node && TREE_CODE (mv3) != ARRAY_TYPE) mv3 = (TYPE_ATOMIC (mv3) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mv3), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mv3)); if (comptypes_internal (mv3, mv2, enum_and_int_p, different_types_p)) break; } if (memb == NULL_TREE) return 0; } else if (TREE_CODE (a2) == UNION_TYPE && (TYPE_NAME (a2) == NULL_TREE || TYPE_TRANSPARENT_AGGR (a2)) && TREE_CODE (TYPE_SIZE (a2)) == INTEGER_CST && tree_int_cst_equal (TYPE_SIZE (a2), TYPE_SIZE (a1))) { tree memb; for (memb = TYPE_FIELDS (a2); memb; memb = DECL_CHAIN (memb)) { tree mv3 = TREE_TYPE (memb); if (mv3 && mv3 != error_mark_node && TREE_CODE (mv3) != ARRAY_TYPE) mv3 = (TYPE_ATOMIC (mv3) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mv3), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mv3)); if (comptypes_internal (mv3, mv1, enum_and_int_p, different_types_p)) break; } if (memb == NULL_TREE) return 0; } else return 0; } /* comptypes said ok, but record if it said to warn. */ if (newval > val) val = newval; args1 = TREE_CHAIN (args1); args2 = TREE_CHAIN (args2); } } /* Compute the size to increment a pointer by. When a function type or void type or incomplete type is passed, size_one_node is returned. This function does not emit any diagnostics; the caller is responsible for that. */ static tree c_size_in_bytes (const_tree type) { enum tree_code code = TREE_CODE (type); if (code == FUNCTION_TYPE || code == VOID_TYPE || code == ERROR_MARK || !COMPLETE_TYPE_P (type)) return size_one_node; /* Convert in case a char is more than one unit. */ return size_binop_loc (input_location, CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type), size_int (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT)); } /* Return either DECL or its known constant value (if it has one). */ tree decl_constant_value_1 (tree decl, bool in_init) { if (/* Note that DECL_INITIAL isn't valid for a PARM_DECL. */ TREE_CODE (decl) != PARM_DECL && !TREE_THIS_VOLATILE (decl) && TREE_READONLY (decl) && DECL_INITIAL (decl) != NULL_TREE && !error_operand_p (DECL_INITIAL (decl)) /* This is invalid if initial value is not constant. If it has either a function call, a memory reference, or a variable, then re-evaluating it could give different results. */ && TREE_CONSTANT (DECL_INITIAL (decl)) /* Check for cases where this is sub-optimal, even though valid. */ && (in_init || TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR)) return DECL_INITIAL (decl); return decl; } /* Return either DECL or its known constant value (if it has one). Like the above, but always return decl outside of functions. */ tree decl_constant_value (tree decl) { /* Don't change a variable array bound or initial value to a constant in a place where a variable is invalid. */ return current_function_decl ? decl_constant_value_1 (decl, false) : decl; } /* Convert the array expression EXP to a pointer. */ static tree array_to_pointer_conversion (location_t loc, tree exp) { tree orig_exp = exp; tree type = TREE_TYPE (exp); tree adr; tree restype = TREE_TYPE (type); tree ptrtype; gcc_assert (TREE_CODE (type) == ARRAY_TYPE); STRIP_TYPE_NOPS (exp); if (TREE_NO_WARNING (orig_exp)) TREE_NO_WARNING (exp) = 1; ptrtype = build_pointer_type (restype); if (INDIRECT_REF_P (exp)) return convert (ptrtype, TREE_OPERAND (exp, 0)); /* In C++ array compound literals are temporary objects unless they are const or appear in namespace scope, so they are destroyed too soon to use them for much of anything (c++/53220). */ if (warn_cxx_compat && TREE_CODE (exp) == COMPOUND_LITERAL_EXPR) { tree decl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); if (!TREE_READONLY (decl) && !TREE_STATIC (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "converting an array compound literal to a pointer " "is ill-formed in C++"); } adr = build_unary_op (loc, ADDR_EXPR, exp, true); return convert (ptrtype, adr); } /* Convert the function expression EXP to a pointer. */ static tree function_to_pointer_conversion (location_t loc, tree exp) { tree orig_exp = exp; gcc_assert (TREE_CODE (TREE_TYPE (exp)) == FUNCTION_TYPE); STRIP_TYPE_NOPS (exp); if (TREE_NO_WARNING (orig_exp)) TREE_NO_WARNING (exp) = 1; return build_unary_op (loc, ADDR_EXPR, exp, false); } /* Mark EXP as read, not just set, for set but not used -Wunused warning purposes. */ void mark_exp_read (tree exp) { switch (TREE_CODE (exp)) { case VAR_DECL: case PARM_DECL: DECL_READ_P (exp) = 1; break; case ARRAY_REF: case COMPONENT_REF: case MODIFY_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: CASE_CONVERT: case ADDR_EXPR: case VIEW_CONVERT_EXPR: mark_exp_read (TREE_OPERAND (exp, 0)); break; case COMPOUND_EXPR: case C_MAYBE_CONST_EXPR: mark_exp_read (TREE_OPERAND (exp, 1)); break; default: break; } } /* Perform the default conversion of arrays and functions to pointers. Return the result of converting EXP. For any other expression, just return EXP. LOC is the location of the expression. */ struct c_expr default_function_array_conversion (location_t loc, struct c_expr exp) { tree orig_exp = exp.value; tree type = TREE_TYPE (exp.value); enum tree_code code = TREE_CODE (type); switch (code) { case ARRAY_TYPE: { bool not_lvalue = false; bool lvalue_array_p; while ((TREE_CODE (exp.value) == NON_LVALUE_EXPR || CONVERT_EXPR_P (exp.value)) && TREE_TYPE (TREE_OPERAND (exp.value, 0)) == type) { if (TREE_CODE (exp.value) == NON_LVALUE_EXPR) not_lvalue = true; exp.value = TREE_OPERAND (exp.value, 0); } if (TREE_NO_WARNING (orig_exp)) TREE_NO_WARNING (exp.value) = 1; lvalue_array_p = !not_lvalue && lvalue_p (exp.value); if (!flag_isoc99 && !lvalue_array_p) { /* Before C99, non-lvalue arrays do not decay to pointers. Normally, using such an array would be invalid; but it can be used correctly inside sizeof or as a statement expression. Thus, do not give an error here; an error will result later. */ return exp; } exp.value = array_to_pointer_conversion (loc, exp.value); } break; case FUNCTION_TYPE: exp.value = function_to_pointer_conversion (loc, exp.value); break; default: break; } return exp; } struct c_expr default_function_array_read_conversion (location_t loc, struct c_expr exp) { mark_exp_read (exp.value); return default_function_array_conversion (loc, exp); } /* Return whether EXPR should be treated as an atomic lvalue for the purposes of load and store handling. */ static bool really_atomic_lvalue (tree expr) { if (error_operand_p (expr)) return false; if (!TYPE_ATOMIC (TREE_TYPE (expr))) return false; if (!lvalue_p (expr)) return false; /* Ignore _Atomic on register variables, since their addresses can't be taken so (a) atomicity is irrelevant and (b) the normal atomic sequences wouldn't work. Ignore _Atomic on structures containing bit-fields, since accessing elements of atomic structures or unions is undefined behavior (C11 6.5.2.3#5), but it's unclear if it's undefined at translation time or execution time, and the normal atomic sequences again wouldn't work. */ while (handled_component_p (expr)) { if (TREE_CODE (expr) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr, 1))) return false; expr = TREE_OPERAND (expr, 0); } if (DECL_P (expr) && C_DECL_REGISTER (expr)) return false; return true; } /* Convert expression EXP (location LOC) from lvalue to rvalue, including converting functions and arrays to pointers if CONVERT_P. If READ_P, also mark the expression as having been read. */ struct c_expr convert_lvalue_to_rvalue (location_t loc, struct c_expr exp, bool convert_p, bool read_p) { if (read_p) mark_exp_read (exp.value); if (convert_p) exp = default_function_array_conversion (loc, exp); if (really_atomic_lvalue (exp.value)) { vec<tree, va_gc> *params; tree nonatomic_type, tmp, tmp_addr, fndecl, func_call; tree expr_type = TREE_TYPE (exp.value); tree expr_addr = build_unary_op (loc, ADDR_EXPR, exp.value, false); tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST); gcc_assert (TYPE_ATOMIC (expr_type)); /* Expansion of a generic atomic load may require an addition element, so allocate enough to prevent a resize. */ vec_alloc (params, 4); /* Remove the qualifiers for the rest of the expressions and create the VAL temp variable to hold the RHS. */ nonatomic_type = build_qualified_type (expr_type, TYPE_UNQUALIFIED); tmp = create_tmp_var_raw (nonatomic_type); tmp_addr = build_unary_op (loc, ADDR_EXPR, tmp, false); TREE_ADDRESSABLE (tmp) = 1; TREE_NO_WARNING (tmp) = 1; /* Issue __atomic_load (&expr, &tmp, SEQ_CST); */ fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD); params->quick_push (expr_addr); params->quick_push (tmp_addr); params->quick_push (seq_cst); func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); /* EXPR is always read. */ mark_exp_read (exp.value); /* Return tmp which contains the value loaded. */ exp.value = build4 (TARGET_EXPR, nonatomic_type, tmp, func_call, NULL_TREE, NULL_TREE); } return exp; } /* EXP is an expression of integer type. Apply the integer promotions to it and return the promoted value. */ tree perform_integral_promotions (tree exp) { tree type = TREE_TYPE (exp); enum tree_code code = TREE_CODE (type); gcc_assert (INTEGRAL_TYPE_P (type)); /* Normally convert enums to int, but convert wide enums to something wider. */ if (code == ENUMERAL_TYPE) { type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), ((TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node)) && TYPE_UNSIGNED (type))); return convert (type, exp); } /* ??? This should no longer be needed now bit-fields have their proper types. */ if (TREE_CODE (exp) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (exp, 1)) /* If it's thinner than an int, promote it like a c_promoting_integer_type_p, otherwise leave it alone. */ && compare_tree_int (DECL_SIZE (TREE_OPERAND (exp, 1)), TYPE_PRECISION (integer_type_node)) < 0) return convert (integer_type_node, exp); if (c_promoting_integer_type_p (type)) { /* Preserve unsignedness if not really getting any wider. */ if (TYPE_UNSIGNED (type) && TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)) return convert (unsigned_type_node, exp); return convert (integer_type_node, exp); } return exp; } /* Perform default promotions for C data used in expressions. Enumeral types or short or char are converted to int. In addition, manifest constants symbols are replaced by their values. */ tree default_conversion (tree exp) { tree orig_exp; tree type = TREE_TYPE (exp); enum tree_code code = TREE_CODE (type); tree promoted_type; mark_exp_read (exp); /* Functions and arrays have been converted during parsing. */ gcc_assert (code != FUNCTION_TYPE); if (code == ARRAY_TYPE) return exp; /* Constants can be used directly unless they're not loadable. */ if (TREE_CODE (exp) == CONST_DECL) exp = DECL_INITIAL (exp); /* Strip no-op conversions. */ orig_exp = exp; STRIP_TYPE_NOPS (exp); if (TREE_NO_WARNING (orig_exp)) TREE_NO_WARNING (exp) = 1; if (code == VOID_TYPE) { error_at (EXPR_LOC_OR_LOC (exp, input_location), "void value not ignored as it ought to be"); return error_mark_node; } exp = require_complete_type (EXPR_LOC_OR_LOC (exp, input_location), exp); if (exp == error_mark_node) return error_mark_node; promoted_type = targetm.promoted_type (type); if (promoted_type) return convert (promoted_type, exp); if (INTEGRAL_TYPE_P (type)) return perform_integral_promotions (exp); return exp; } /* Look up COMPONENT in a structure or union TYPE. If the component name is not found, returns NULL_TREE. Otherwise, the return value is a TREE_LIST, with each TREE_VALUE a FIELD_DECL stepping down the chain to the component, which is in the last TREE_VALUE of the list. Normally the list is of length one, but if the component is embedded within (nested) anonymous structures or unions, the list steps down the chain to the component. */ static tree lookup_field (tree type, tree component) { tree field; /* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers to the field elements. Use a binary search on this array to quickly find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC will always be set for structures which have many elements. */ if (TYPE_LANG_SPECIFIC (type) && TYPE_LANG_SPECIFIC (type)->s) { int bot, top, half; tree *field_array = &TYPE_LANG_SPECIFIC (type)->s->elts[0]; field = TYPE_FIELDS (type); bot = 0; top = TYPE_LANG_SPECIFIC (type)->s->len; while (top - bot > 1) { half = (top - bot + 1) >> 1; field = field_array[bot+half]; if (DECL_NAME (field) == NULL_TREE) { /* Step through all anon unions in linear fashion. */ while (DECL_NAME (field_array[bot]) == NULL_TREE) { field = field_array[bot++]; if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (field))) { tree anon = lookup_field (TREE_TYPE (field), component); if (anon) return tree_cons (NULL_TREE, field, anon); /* The Plan 9 compiler permits referring directly to an anonymous struct/union field using a typedef name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (field)) != NULL_TREE && (TREE_CODE (TYPE_NAME (TREE_TYPE (field))) == TYPE_DECL) && (DECL_NAME (TYPE_NAME (TREE_TYPE (field))) == component)) break; } } /* Entire record is only anon unions. */ if (bot > top) return NULL_TREE; /* Restart the binary search, with new lower bound. */ continue; } if (DECL_NAME (field) == component) break; if (DECL_NAME (field) < component) bot += half; else top = bot + half; } if (DECL_NAME (field_array[bot]) == component) field = field_array[bot]; else if (DECL_NAME (field) != component) return NULL_TREE; } else { for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) { if (DECL_NAME (field) == NULL_TREE && RECORD_OR_UNION_TYPE_P (TREE_TYPE (field))) { tree anon = lookup_field (TREE_TYPE (field), component); if (anon) return tree_cons (NULL_TREE, field, anon); /* The Plan 9 compiler permits referring directly to an anonymous struct/union field using a typedef name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (field)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (field))) == TYPE_DECL && (DECL_NAME (TYPE_NAME (TREE_TYPE (field))) == component)) break; } if (DECL_NAME (field) == component) break; } if (field == NULL_TREE) return NULL_TREE; } return tree_cons (NULL_TREE, field, NULL_TREE); } /* Recursively append candidate IDENTIFIER_NODEs to CANDIDATES. */ static void lookup_field_fuzzy_find_candidates (tree type, tree component, vec<tree> *candidates) { tree field; for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) { if (DECL_NAME (field) == NULL_TREE && RECORD_OR_UNION_TYPE_P (TREE_TYPE (field))) lookup_field_fuzzy_find_candidates (TREE_TYPE (field), component, candidates); if (DECL_NAME (field)) candidates->safe_push (DECL_NAME (field)); } } /* Like "lookup_field", but find the closest matching IDENTIFIER_NODE, rather than returning a TREE_LIST for an exact match. */ static tree lookup_field_fuzzy (tree type, tree component) { gcc_assert (TREE_CODE (component) == IDENTIFIER_NODE); /* First, gather a list of candidates. */ auto_vec <tree> candidates; lookup_field_fuzzy_find_candidates (type, component, &candidates); return find_closest_identifier (component, &candidates); } /* Support function for build_component_ref's error-handling. Given DATUM_TYPE, and "DATUM.COMPONENT", where DATUM is *not* a struct or union, should we suggest "DATUM->COMPONENT" as a hint? */ static bool should_suggest_deref_p (tree datum_type) { /* We don't do it for Objective-C, since Objective-C 2.0 dot-syntax allows "." for ptrs; we could be handling a failed attempt to access a property. */ if (c_dialect_objc ()) return false; /* Only suggest it for pointers... */ if (TREE_CODE (datum_type) != POINTER_TYPE) return false; /* ...to structs/unions. */ tree underlying_type = TREE_TYPE (datum_type); enum tree_code code = TREE_CODE (underlying_type); if (code == RECORD_TYPE || code == UNION_TYPE) return true; else return false; } /* Make an expression to refer to the COMPONENT field of structure or union value DATUM. COMPONENT is an IDENTIFIER_NODE. LOC is the location of the COMPONENT_REF. COMPONENT_LOC is the location of COMPONENT. */ tree build_component_ref (location_t loc, tree datum, tree component, location_t component_loc) { tree type = TREE_TYPE (datum); enum tree_code code = TREE_CODE (type); tree field = NULL; tree ref; bool datum_lvalue = lvalue_p (datum); if (!objc_is_public (datum, component)) return error_mark_node; /* Detect Objective-C property syntax object.property. */ if (c_dialect_objc () && (ref = objc_maybe_build_component_ref (datum, component))) return ref; /* See if there is a field or component with name COMPONENT. */ if (code == RECORD_TYPE || code == UNION_TYPE) { if (!COMPLETE_TYPE_P (type)) { c_incomplete_type_error (loc, NULL_TREE, type); return error_mark_node; } field = lookup_field (type, component); if (!field) { tree guessed_id = lookup_field_fuzzy (type, component); if (guessed_id) { /* Attempt to provide a fixit replacement hint, if we have a valid range for the component. */ location_t reported_loc = (component_loc != UNKNOWN_LOCATION) ? component_loc : loc; gcc_rich_location rich_loc (reported_loc); if (component_loc != UNKNOWN_LOCATION) rich_loc.add_fixit_misspelled_id (component_loc, guessed_id); error_at (&rich_loc, "%qT has no member named %qE; did you mean %qE?", type, component, guessed_id); } else error_at (loc, "%qT has no member named %qE", type, component); return error_mark_node; } /* Accessing elements of atomic structures or unions is undefined behavior (C11 6.5.2.3#5). */ if (TYPE_ATOMIC (type) && c_inhibit_evaluation_warnings == 0) { if (code == RECORD_TYPE) warning_at (loc, 0, "accessing a member %qE of an atomic " "structure %qE", component, datum); else warning_at (loc, 0, "accessing a member %qE of an atomic " "union %qE", component, datum); } /* Chain the COMPONENT_REFs if necessary down to the FIELD. This might be better solved in future the way the C++ front end does it - by giving the anonymous entities each a separate name and type, and then have build_component_ref recursively call itself. We can't do that here. */ do { tree subdatum = TREE_VALUE (field); int quals; tree subtype; bool use_datum_quals; if (TREE_TYPE (subdatum) == error_mark_node) return error_mark_node; /* If this is an rvalue, it does not have qualifiers in C standard terms and we must avoid propagating such qualifiers down to a non-lvalue array that is then converted to a pointer. */ use_datum_quals = (datum_lvalue || TREE_CODE (TREE_TYPE (subdatum)) != ARRAY_TYPE); quals = TYPE_QUALS (strip_array_types (TREE_TYPE (subdatum))); if (use_datum_quals) quals |= TYPE_QUALS (TREE_TYPE (datum)); subtype = c_build_qualified_type (TREE_TYPE (subdatum), quals); ref = build3 (COMPONENT_REF, subtype, datum, subdatum, NULL_TREE); SET_EXPR_LOCATION (ref, loc); if (TREE_READONLY (subdatum) || (use_datum_quals && TREE_READONLY (datum))) TREE_READONLY (ref) = 1; if (TREE_THIS_VOLATILE (subdatum) || (use_datum_quals && TREE_THIS_VOLATILE (datum))) TREE_THIS_VOLATILE (ref) = 1; if (TREE_DEPRECATED (subdatum)) warn_deprecated_use (subdatum, NULL_TREE); datum = ref; field = TREE_CHAIN (field); } while (field); return ref; } else if (should_suggest_deref_p (type)) { /* Special-case the error message for "ptr.field" for the case where the user has confused "." vs "->". */ rich_location richloc (line_table, loc); /* "loc" should be the "." token. */ richloc.add_fixit_replace ("->"); error_at (&richloc, "%qE is a pointer; did you mean to use %<->%>?", datum); return error_mark_node; } else if (code != ERROR_MARK) error_at (loc, "request for member %qE in something not a structure or union", component); return error_mark_node; } /* Given an expression PTR for a pointer, return an expression for the value pointed to. ERRORSTRING is the name of the operator to appear in error messages. LOC is the location to use for the generated tree. */ tree build_indirect_ref (location_t loc, tree ptr, ref_operator errstring) { tree pointer = default_conversion (ptr); tree type = TREE_TYPE (pointer); tree ref; if (TREE_CODE (type) == POINTER_TYPE) { if (CONVERT_EXPR_P (pointer) || TREE_CODE (pointer) == VIEW_CONVERT_EXPR) { /* If a warning is issued, mark it to avoid duplicates from the backend. This only needs to be done at warn_strict_aliasing > 2. */ if (warn_strict_aliasing > 2) if (strict_aliasing_warning (EXPR_LOCATION (pointer), type, TREE_OPERAND (pointer, 0))) TREE_NO_WARNING (pointer) = 1; } if (TREE_CODE (pointer) == ADDR_EXPR && (TREE_TYPE (TREE_OPERAND (pointer, 0)) == TREE_TYPE (type))) { ref = TREE_OPERAND (pointer, 0); protected_set_expr_location (ref, loc); return ref; } else { tree t = TREE_TYPE (type); ref = build1 (INDIRECT_REF, t, pointer); if (!COMPLETE_OR_VOID_TYPE_P (t) && TREE_CODE (t) != ARRAY_TYPE) { if (!C_TYPE_ERROR_REPORTED (TREE_TYPE (ptr))) { error_at (loc, "dereferencing pointer to incomplete type " "%qT", t); C_TYPE_ERROR_REPORTED (TREE_TYPE (ptr)) = 1; } return error_mark_node; } if (VOID_TYPE_P (t) && c_inhibit_evaluation_warnings == 0) warning_at (loc, 0, "dereferencing %<void *%> pointer"); /* We *must* set TREE_READONLY when dereferencing a pointer to const, so that we get the proper error message if the result is used to assign to. Also, &* is supposed to be a no-op. And ANSI C seems to specify that the type of the result should be the const type. */ /* A de-reference of a pointer to const is not a const. It is valid to change it via some other pointer. */ TREE_READONLY (ref) = TYPE_READONLY (t); TREE_SIDE_EFFECTS (ref) = TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer); TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t); protected_set_expr_location (ref, loc); return ref; } } else if (TREE_CODE (pointer) != ERROR_MARK) invalid_indirection_error (loc, type, errstring); return error_mark_node; } /* This handles expressions of the form "a[i]", which denotes an array reference. This is logically equivalent in C to *(a+i), but we may do it differently. If A is a variable or a member, we generate a primitive ARRAY_REF. This avoids forcing the array out of registers, and can work on arrays that are not lvalues (for example, members of structures returned by functions). For vector types, allow vector[i] but not i[vector], and create *(((type*)&vectortype) + i) for the expression. LOC is the location to use for the returned expression. */ tree build_array_ref (location_t loc, tree array, tree index) { tree ret; bool swapped = false; if (TREE_TYPE (array) == error_mark_node || TREE_TYPE (index) == error_mark_node) return error_mark_node; if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE && TREE_CODE (TREE_TYPE (array)) != POINTER_TYPE /* Allow vector[index] but not index[vector]. */ && !VECTOR_TYPE_P (TREE_TYPE (array))) { if (TREE_CODE (TREE_TYPE (index)) != ARRAY_TYPE && TREE_CODE (TREE_TYPE (index)) != POINTER_TYPE) { error_at (loc, "subscripted value is neither array nor pointer nor vector"); return error_mark_node; } std::swap (array, index); swapped = true; } if (!INTEGRAL_TYPE_P (TREE_TYPE (index))) { error_at (loc, "array subscript is not an integer"); return error_mark_node; } if (TREE_CODE (TREE_TYPE (TREE_TYPE (array))) == FUNCTION_TYPE) { error_at (loc, "subscripted value is pointer to function"); return error_mark_node; } /* ??? Existing practice has been to warn only when the char index is syntactically the index, not for char[array]. */ if (!swapped) warn_array_subscript_with_type_char (loc, index); /* Apply default promotions *after* noticing character types. */ index = default_conversion (index); if (index == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (TREE_TYPE (index)) == INTEGER_TYPE); bool was_vector = VECTOR_TYPE_P (TREE_TYPE (array)); bool non_lvalue = convert_vector_to_array_for_subscript (loc, &array, index); if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE) { tree rval, type; /* An array that is indexed by a non-constant cannot be stored in a register; we must be able to do address arithmetic on its address. Likewise an array of elements of variable size. */ if (TREE_CODE (index) != INTEGER_CST || (COMPLETE_TYPE_P (TREE_TYPE (TREE_TYPE (array))) && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST)) { if (!c_mark_addressable (array, true)) return error_mark_node; } /* An array that is indexed by a constant value which is not within the array bounds cannot be stored in a register either; because we would get a crash in store_bit_field/extract_bit_field when trying to access a non-existent part of the register. */ if (TREE_CODE (index) == INTEGER_CST && TYPE_DOMAIN (TREE_TYPE (array)) && !int_fits_type_p (index, TYPE_DOMAIN (TREE_TYPE (array)))) { if (!c_mark_addressable (array)) return error_mark_node; } if ((pedantic || warn_c90_c99_compat) && ! was_vector) { tree foo = array; while (TREE_CODE (foo) == COMPONENT_REF) foo = TREE_OPERAND (foo, 0); if (VAR_P (foo) && C_DECL_REGISTER (foo)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids subscripting %<register%> array"); else if (!lvalue_p (foo)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 forbids subscripting non-lvalue " "array"); } type = TREE_TYPE (TREE_TYPE (array)); rval = build4 (ARRAY_REF, type, array, index, NULL_TREE, NULL_TREE); /* Array ref is const/volatile if the array elements are or if the array is. */ TREE_READONLY (rval) |= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array))) | TREE_READONLY (array)); TREE_SIDE_EFFECTS (rval) |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array))) | TREE_SIDE_EFFECTS (array)); TREE_THIS_VOLATILE (rval) |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array))) /* This was added by rms on 16 Nov 91. It fixes vol struct foo *a; a->elts[1] in an inline function. Hope it doesn't break something else. */ | TREE_THIS_VOLATILE (array)); ret = require_complete_type (loc, rval); protected_set_expr_location (ret, loc); if (non_lvalue) ret = non_lvalue_loc (loc, ret); return ret; } else { tree ar = default_conversion (array); if (ar == error_mark_node) return ar; gcc_assert (TREE_CODE (TREE_TYPE (ar)) == POINTER_TYPE); gcc_assert (TREE_CODE (TREE_TYPE (TREE_TYPE (ar))) != FUNCTION_TYPE); ret = build_indirect_ref (loc, build_binary_op (loc, PLUS_EXPR, ar, index, false), RO_ARRAY_INDEXING); if (non_lvalue) ret = non_lvalue_loc (loc, ret); return ret; } } /* Build an external reference to identifier ID. FUN indicates whether this will be used for a function call. LOC is the source location of the identifier. This sets *TYPE to the type of the identifier, which is not the same as the type of the returned value for CONST_DECLs defined as enum constants. If the type of the identifier is not available, *TYPE is set to NULL. */ tree build_external_ref (location_t loc, tree id, bool fun, tree *type) { tree ref; tree decl = lookup_name (id); /* In Objective-C, an instance variable (ivar) may be preferred to whatever lookup_name() found. */ decl = objc_lookup_ivar (decl, id); *type = NULL; if (decl && decl != error_mark_node) { ref = decl; *type = TREE_TYPE (ref); } else if (fun) /* Implicit function declaration. */ ref = implicitly_declare (loc, id); else if (decl == error_mark_node) /* Don't complain about something that's already been complained about. */ return error_mark_node; else { undeclared_variable (loc, id); return error_mark_node; } if (TREE_TYPE (ref) == error_mark_node) return error_mark_node; if (TREE_DEPRECATED (ref)) warn_deprecated_use (ref, NULL_TREE); /* Recursive call does not count as usage. */ if (ref != current_function_decl) { TREE_USED (ref) = 1; } if (TREE_CODE (ref) == FUNCTION_DECL && !in_alignof) { if (!in_sizeof && !in_typeof) C_DECL_USED (ref) = 1; else if (DECL_INITIAL (ref) == NULL_TREE && DECL_EXTERNAL (ref) && !TREE_PUBLIC (ref)) record_maybe_used_decl (ref); } if (TREE_CODE (ref) == CONST_DECL) { used_types_insert (TREE_TYPE (ref)); if (warn_cxx_compat && TREE_CODE (TREE_TYPE (ref)) == ENUMERAL_TYPE && C_TYPE_DEFINED_IN_STRUCT (TREE_TYPE (ref))) { warning_at (loc, OPT_Wc___compat, ("enum constant defined in struct or union " "is not visible in C++")); inform (DECL_SOURCE_LOCATION (ref), "enum constant defined here"); } ref = DECL_INITIAL (ref); TREE_CONSTANT (ref) = 1; } else if (current_function_decl != NULL_TREE && !DECL_FILE_SCOPE_P (current_function_decl) && (VAR_OR_FUNCTION_DECL_P (ref) || TREE_CODE (ref) == PARM_DECL)) { tree context = decl_function_context (ref); if (context != NULL_TREE && context != current_function_decl) DECL_NONLOCAL (ref) = 1; } /* C99 6.7.4p3: An inline definition of a function with external linkage ... shall not contain a reference to an identifier with internal linkage. */ else if (current_function_decl != NULL_TREE && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl) && VAR_OR_FUNCTION_DECL_P (ref) && (!VAR_P (ref) || TREE_STATIC (ref)) && ! TREE_PUBLIC (ref) && DECL_CONTEXT (ref) != current_function_decl) record_inline_static (loc, current_function_decl, ref, csi_internal); return ref; } /* Record details of decls possibly used inside sizeof or typeof. */ struct maybe_used_decl { /* The decl. */ tree decl; /* The level seen at (in_sizeof + in_typeof). */ int level; /* The next one at this level or above, or NULL. */ struct maybe_used_decl *next; }; static struct maybe_used_decl *maybe_used_decls; /* Record that DECL, an undefined static function reference seen inside sizeof or typeof, might be used if the operand of sizeof is a VLA type or the operand of typeof is a variably modified type. */ static void record_maybe_used_decl (tree decl) { struct maybe_used_decl *t = XOBNEW (&parser_obstack, struct maybe_used_decl); t->decl = decl; t->level = in_sizeof + in_typeof; t->next = maybe_used_decls; maybe_used_decls = t; } /* Pop the stack of decls possibly used inside sizeof or typeof. If USED is false, just discard them. If it is true, mark them used (if no longer inside sizeof or typeof) or move them to the next level up (if still inside sizeof or typeof). */ void pop_maybe_used (bool used) { struct maybe_used_decl *p = maybe_used_decls; int cur_level = in_sizeof + in_typeof; while (p && p->level > cur_level) { if (used) { if (cur_level == 0) C_DECL_USED (p->decl) = 1; else p->level = cur_level; } p = p->next; } if (!used || cur_level == 0) maybe_used_decls = p; } /* Return the result of sizeof applied to EXPR. */ struct c_expr c_expr_sizeof_expr (location_t loc, struct c_expr expr) { struct c_expr ret; if (expr.value == error_mark_node) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; pop_maybe_used (false); } else { bool expr_const_operands = true; if (TREE_CODE (expr.value) == PARM_DECL && C_ARRAY_PARAMETER (expr.value)) { if (warning_at (loc, OPT_Wsizeof_array_argument, "%<sizeof%> on array function parameter %qE will " "return size of %qT", expr.value, TREE_TYPE (expr.value))) inform (DECL_SOURCE_LOCATION (expr.value), "declared here"); } tree folded_expr = c_fully_fold (expr.value, require_constant_value, &expr_const_operands); ret.value = c_sizeof (loc, TREE_TYPE (folded_expr)); c_last_sizeof_arg = expr.value; c_last_sizeof_loc = loc; ret.original_code = SIZEOF_EXPR; ret.original_type = NULL; if (c_vla_type_p (TREE_TYPE (folded_expr))) { /* sizeof is evaluated when given a vla (C99 6.5.3.4p2). */ ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value), folded_expr, ret.value); C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !expr_const_operands; SET_EXPR_LOCATION (ret.value, loc); } pop_maybe_used (C_TYPE_VARIABLE_SIZE (TREE_TYPE (folded_expr))); } return ret; } /* Return the result of sizeof applied to T, a structure for the type name passed to sizeof (rather than the type itself). LOC is the location of the original expression. */ struct c_expr c_expr_sizeof_type (location_t loc, struct c_type_name *t) { tree type; struct c_expr ret; tree type_expr = NULL_TREE; bool type_expr_const = true; type = groktypename (t, &type_expr, &type_expr_const); ret.value = c_sizeof (loc, type); c_last_sizeof_arg = type; c_last_sizeof_loc = loc; ret.original_code = SIZEOF_EXPR; ret.original_type = NULL; if ((type_expr || TREE_CODE (ret.value) == INTEGER_CST) && c_vla_type_p (type)) { /* If the type is a [*] array, it is a VLA but is represented as having a size of zero. In such a case we must ensure that the result of sizeof does not get folded to a constant by c_fully_fold, because if the size is evaluated the result is not constant and so constraints on zero or negative size arrays must not be applied when this sizeof call is inside another array declarator. */ if (!type_expr) type_expr = integer_zero_node; ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value), type_expr, ret.value); C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !type_expr_const; } pop_maybe_used (type != error_mark_node ? C_TYPE_VARIABLE_SIZE (type) : false); return ret; } /* Build a function call to function FUNCTION with parameters PARAMS. The function call is at LOC. PARAMS is a list--a chain of TREE_LIST nodes--in which the TREE_VALUE of each node is a parameter-expression. FUNCTION's data type may be a function type or a pointer-to-function. */ tree build_function_call (location_t loc, tree function, tree params) { vec<tree, va_gc> *v; tree ret; vec_alloc (v, list_length (params)); for (; params; params = TREE_CHAIN (params)) v->quick_push (TREE_VALUE (params)); ret = c_build_function_call_vec (loc, vNULL, function, v, NULL); vec_free (v); return ret; } /* Give a note about the location of the declaration of DECL. */ static void inform_declaration (tree decl) { if (decl && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_IS_BUILTIN (decl))) inform (DECL_SOURCE_LOCATION (decl), "declared here"); } /* Build a function call to function FUNCTION with parameters PARAMS. ORIGTYPES, if not NULL, is a vector of types; each element is either NULL or the original type of the corresponding element in PARAMS. The original type may differ from TREE_TYPE of the parameter for enums. FUNCTION's data type may be a function type or pointer-to-function. This function changes the elements of PARAMS. */ tree build_function_call_vec (location_t loc, vec<location_t> arg_loc, tree function, vec<tree, va_gc> *params, vec<tree, va_gc> *origtypes) { tree fntype, fundecl = NULL_TREE; tree name = NULL_TREE, result; tree tem; int nargs; tree *argarray; /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (function); /* Convert anything with function type to a pointer-to-function. */ if (TREE_CODE (function) == FUNCTION_DECL) { name = DECL_NAME (function); if (flag_tm) tm_malloc_replacement (function); fundecl = function; /* Atomic functions have type checking/casting already done. They are often rewritten and don't match the original parameter list. */ if (name && !strncmp (IDENTIFIER_POINTER (name), "__atomic_", 9)) origtypes = NULL; } if (TREE_CODE (TREE_TYPE (function)) == FUNCTION_TYPE) function = function_to_pointer_conversion (loc, function); /* For Objective-C, convert any calls via a cast to OBJC_TYPE_REF expressions, like those used for ObjC messenger dispatches. */ if (params && !params->is_empty ()) function = objc_rewrite_function_call (function, (*params)[0]); function = c_fully_fold (function, false, NULL); fntype = TREE_TYPE (function); if (TREE_CODE (fntype) == ERROR_MARK) return error_mark_node; if (!(TREE_CODE (fntype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE)) { if (!flag_diagnostics_show_caret) error_at (loc, "called object %qE is not a function or function pointer", function); else if (DECL_P (function)) { error_at (loc, "called object %qD is not a function or function pointer", function); inform_declaration (function); } else error_at (loc, "called object is not a function or function pointer"); return error_mark_node; } if (fundecl && TREE_THIS_VOLATILE (fundecl)) current_function_returns_abnormally = 1; /* fntype now gets the type of function pointed to. */ fntype = TREE_TYPE (fntype); /* Convert the parameters to the types declared in the function prototype, or apply default promotions. */ nargs = convert_arguments (loc, arg_loc, TYPE_ARG_TYPES (fntype), params, origtypes, function, fundecl); if (nargs < 0) return error_mark_node; /* Check that the function is called through a compatible prototype. If it is not, warn. */ if (CONVERT_EXPR_P (function) && TREE_CODE (tem = TREE_OPERAND (function, 0)) == ADDR_EXPR && TREE_CODE (tem = TREE_OPERAND (tem, 0)) == FUNCTION_DECL && !comptypes (fntype, TREE_TYPE (tem))) { tree return_type = TREE_TYPE (fntype); /* This situation leads to run-time undefined behavior. We can't, therefore, simply error unless we can prove that all possible executions of the program must execute the code. */ warning_at (loc, 0, "function called through a non-compatible type"); if (VOID_TYPE_P (return_type) && TYPE_QUALS (return_type) != TYPE_UNQUALIFIED) pedwarn (loc, 0, "function with qualified void return type called"); } argarray = vec_safe_address (params); /* Check that arguments to builtin functions match the expectations. */ if (fundecl && DECL_BUILT_IN (fundecl) && DECL_BUILT_IN_CLASS (fundecl) == BUILT_IN_NORMAL && !check_builtin_function_arguments (loc, arg_loc, fundecl, nargs, argarray)) return error_mark_node; /* Check that the arguments to the function are valid. */ bool warned_p = check_function_arguments (loc, fundecl, fntype, nargs, argarray, &arg_loc); if (name != NULL_TREE && !strncmp (IDENTIFIER_POINTER (name), "__builtin_", 10)) { if (require_constant_value) result = fold_build_call_array_initializer_loc (loc, TREE_TYPE (fntype), function, nargs, argarray); else result = fold_build_call_array_loc (loc, TREE_TYPE (fntype), function, nargs, argarray); if (TREE_CODE (result) == NOP_EXPR && TREE_CODE (TREE_OPERAND (result, 0)) == INTEGER_CST) STRIP_TYPE_NOPS (result); } else result = build_call_array_loc (loc, TREE_TYPE (fntype), function, nargs, argarray); /* If -Wnonnull warning has been diagnosed, avoid diagnosing it again later. */ if (warned_p && TREE_CODE (result) == CALL_EXPR) TREE_NO_WARNING (result) = 1; /* In this improbable scenario, a nested function returns a VM type. Create a TARGET_EXPR so that the call always has a LHS, much as what the C++ FE does for functions returning non-PODs. */ if (variably_modified_type_p (TREE_TYPE (fntype), NULL_TREE)) { tree tmp = create_tmp_var_raw (TREE_TYPE (fntype)); result = build4 (TARGET_EXPR, TREE_TYPE (fntype), tmp, result, NULL_TREE, NULL_TREE); } if (VOID_TYPE_P (TREE_TYPE (result))) { if (TYPE_QUALS (TREE_TYPE (result)) != TYPE_UNQUALIFIED) pedwarn (loc, 0, "function with qualified void return type called"); return result; } return require_complete_type (loc, result); } /* Like build_function_call_vec, but call also resolve_overloaded_builtin. */ tree c_build_function_call_vec (location_t loc, vec<location_t> arg_loc, tree function, vec<tree, va_gc> *params, vec<tree, va_gc> *origtypes) { /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (function); /* Convert anything with function type to a pointer-to-function. */ if (TREE_CODE (function) == FUNCTION_DECL) { /* Implement type-directed function overloading for builtins. resolve_overloaded_builtin and targetm.resolve_overloaded_builtin handle all the type checking. The result is a complete expression that implements this function call. */ tree tem = resolve_overloaded_builtin (loc, function, params); if (tem) return tem; } return build_function_call_vec (loc, arg_loc, function, params, origtypes); } /* Convert the argument expressions in the vector VALUES to the types in the list TYPELIST. If TYPELIST is exhausted, or when an element has NULL as its type, perform the default conversions. ORIGTYPES is the original types of the expressions in VALUES. This holds the type of enum values which have been converted to integral types. It may be NULL. FUNCTION is a tree for the called function. It is used only for error messages, where it is formatted with %qE. This is also where warnings about wrong number of args are generated. ARG_LOC are locations of function arguments (if any). Returns the actual number of arguments processed (which may be less than the length of VALUES in some error situations), or -1 on failure. */ static int convert_arguments (location_t loc, vec<location_t> arg_loc, tree typelist, vec<tree, va_gc> *values, vec<tree, va_gc> *origtypes, tree function, tree fundecl) { tree typetail, val; unsigned int parmnum; bool error_args = false; const bool type_generic = fundecl && lookup_attribute ("type generic", TYPE_ATTRIBUTES (TREE_TYPE (fundecl))); bool type_generic_remove_excess_precision = false; bool type_generic_overflow_p = false; tree selector; /* Change pointer to function to the function itself for diagnostics. */ if (TREE_CODE (function) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL) function = TREE_OPERAND (function, 0); /* Handle an ObjC selector specially for diagnostics. */ selector = objc_message_selector (); /* For type-generic built-in functions, determine whether excess precision should be removed (classification) or not (comparison). */ if (type_generic && DECL_BUILT_IN (fundecl) && DECL_BUILT_IN_CLASS (fundecl) == BUILT_IN_NORMAL) { switch (DECL_FUNCTION_CODE (fundecl)) { case BUILT_IN_ISFINITE: case BUILT_IN_ISINF: case BUILT_IN_ISINF_SIGN: case BUILT_IN_ISNAN: case BUILT_IN_ISNORMAL: case BUILT_IN_FPCLASSIFY: type_generic_remove_excess_precision = true; break; case BUILT_IN_ADD_OVERFLOW_P: case BUILT_IN_SUB_OVERFLOW_P: case BUILT_IN_MUL_OVERFLOW_P: /* The last argument of these type-generic builtins should not be promoted. */ type_generic_overflow_p = true; break; default: break; } } /* Scan the given expressions and types, producing individual converted arguments. */ for (typetail = typelist, parmnum = 0; values && values->iterate (parmnum, &val); ++parmnum) { tree type = typetail ? TREE_VALUE (typetail) : 0; tree valtype = TREE_TYPE (val); tree rname = function; int argnum = parmnum + 1; const char *invalid_func_diag; bool excess_precision = false; bool npc; tree parmval; /* Some __atomic_* builtins have additional hidden argument at position 0. */ location_t ploc = !arg_loc.is_empty () && values->length () == arg_loc.length () ? expansion_point_location_if_in_system_header (arg_loc[parmnum]) : input_location; if (type == void_type_node) { if (selector) error_at (loc, "too many arguments to method %qE", selector); else error_at (loc, "too many arguments to function %qE", function); inform_declaration (fundecl); return error_args ? -1 : (int) parmnum; } if (selector && argnum > 2) { rname = selector; argnum -= 2; } npc = null_pointer_constant_p (val); /* If there is excess precision and a prototype, convert once to the required type rather than converting via the semantic type. Likewise without a prototype a float value represented as long double should be converted once to double. But for type-generic classification functions excess precision must be removed here. */ if (TREE_CODE (val) == EXCESS_PRECISION_EXPR && (type || !type_generic || !type_generic_remove_excess_precision)) { val = TREE_OPERAND (val, 0); excess_precision = true; } val = c_fully_fold (val, false, NULL); STRIP_TYPE_NOPS (val); val = require_complete_type (ploc, val); /* Some floating-point arguments must be promoted to double when no type is specified by a prototype. This applies to arguments of type float, and to architecture-specific types (ARM __fp16), but not to _FloatN or _FloatNx types. */ bool promote_float_arg = false; if (type == NULL_TREE && TREE_CODE (valtype) == REAL_TYPE && (TYPE_PRECISION (valtype) <= TYPE_PRECISION (double_type_node)) && TYPE_MAIN_VARIANT (valtype) != double_type_node && TYPE_MAIN_VARIANT (valtype) != long_double_type_node && !DECIMAL_FLOAT_MODE_P (TYPE_MODE (valtype))) { /* Promote this argument, unless it has a _FloatN or _FloatNx type. */ promote_float_arg = true; for (int i = 0; i < NUM_FLOATN_NX_TYPES; i++) if (TYPE_MAIN_VARIANT (valtype) == FLOATN_NX_TYPE_NODE (i)) { promote_float_arg = false; break; } } if (type != NULL_TREE) { /* Formal parm type is specified by a function prototype. */ if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { error_at (ploc, "type of formal parameter %d is incomplete", parmnum + 1); parmval = val; } else { tree origtype; /* Optionally warn about conversions that differ from the default conversions. */ if (warn_traditional_conversion || warn_traditional) { unsigned int formal_prec = TYPE_PRECISION (type); if (INTEGRAL_TYPE_P (type) && TREE_CODE (valtype) == REAL_TYPE) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE as integer rather " "than floating due to prototype", argnum, rname); if (INTEGRAL_TYPE_P (type) && TREE_CODE (valtype) == COMPLEX_TYPE) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE as integer rather " "than complex due to prototype", argnum, rname); else if (TREE_CODE (type) == COMPLEX_TYPE && TREE_CODE (valtype) == REAL_TYPE) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE as complex rather " "than floating due to prototype", argnum, rname); else if (TREE_CODE (type) == REAL_TYPE && INTEGRAL_TYPE_P (valtype)) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE as floating rather " "than integer due to prototype", argnum, rname); else if (TREE_CODE (type) == COMPLEX_TYPE && INTEGRAL_TYPE_P (valtype)) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE as complex rather " "than integer due to prototype", argnum, rname); else if (TREE_CODE (type) == REAL_TYPE && TREE_CODE (valtype) == COMPLEX_TYPE) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE as floating rather " "than complex due to prototype", argnum, rname); /* ??? At some point, messages should be written about conversions between complex types, but that's too messy to do now. */ else if (TREE_CODE (type) == REAL_TYPE && TREE_CODE (valtype) == REAL_TYPE) { /* Warn if any argument is passed as `float', since without a prototype it would be `double'. */ if (formal_prec == TYPE_PRECISION (float_type_node) && type != dfloat32_type_node) warning_at (ploc, 0, "passing argument %d of %qE as %<float%> " "rather than %<double%> due to prototype", argnum, rname); /* Warn if mismatch between argument and prototype for decimal float types. Warn of conversions with binary float types and of precision narrowing due to prototype. */ else if (type != valtype && (type == dfloat32_type_node || type == dfloat64_type_node || type == dfloat128_type_node || valtype == dfloat32_type_node || valtype == dfloat64_type_node || valtype == dfloat128_type_node) && (formal_prec <= TYPE_PRECISION (valtype) || (type == dfloat128_type_node && (valtype != dfloat64_type_node && (valtype != dfloat32_type_node))) || (type == dfloat64_type_node && (valtype != dfloat32_type_node)))) warning_at (ploc, 0, "passing argument %d of %qE as %qT " "rather than %qT due to prototype", argnum, rname, type, valtype); } /* Detect integer changing in width or signedness. These warnings are only activated with -Wtraditional-conversion, not with -Wtraditional. */ else if (warn_traditional_conversion && INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (valtype)) { tree would_have_been = default_conversion (val); tree type1 = TREE_TYPE (would_have_been); if (val == error_mark_node) /* VAL could have been of incomplete type. */; else if (TREE_CODE (type) == ENUMERAL_TYPE && (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (valtype))) /* No warning if function asks for enum and the actual arg is that enum type. */ ; else if (formal_prec != TYPE_PRECISION (type1)) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE " "with different width due to prototype", argnum, rname); else if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (type1)) ; /* Don't complain if the formal parameter type is an enum, because we can't tell now whether the value was an enum--even the same enum. */ else if (TREE_CODE (type) == ENUMERAL_TYPE) ; else if (TREE_CODE (val) == INTEGER_CST && int_fits_type_p (val, type)) /* Change in signedness doesn't matter if a constant value is unaffected. */ ; /* If the value is extended from a narrower unsigned type, it doesn't matter whether we pass it as signed or unsigned; the value certainly is the same either way. */ else if (TYPE_PRECISION (valtype) < TYPE_PRECISION (type) && TYPE_UNSIGNED (valtype)) ; else if (TYPE_UNSIGNED (type)) warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE " "as unsigned due to prototype", argnum, rname); else warning_at (ploc, OPT_Wtraditional_conversion, "passing argument %d of %qE " "as signed due to prototype", argnum, rname); } } /* Possibly restore an EXCESS_PRECISION_EXPR for the sake of better warnings from convert_and_check. */ if (excess_precision) val = build1 (EXCESS_PRECISION_EXPR, valtype, val); origtype = (!origtypes) ? NULL_TREE : (*origtypes)[parmnum]; parmval = convert_for_assignment (loc, ploc, type, val, origtype, ic_argpass, npc, fundecl, function, parmnum + 1); if (targetm.calls.promote_prototypes (fundecl ? TREE_TYPE (fundecl) : 0) && INTEGRAL_TYPE_P (type) && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))) parmval = default_conversion (parmval); } } else if (promote_float_arg) { if (type_generic) parmval = val; else { /* Convert `float' to `double'. */ if (warn_double_promotion && !c_inhibit_evaluation_warnings) warning_at (ploc, OPT_Wdouble_promotion, "implicit conversion from %qT to %qT when passing " "argument to function", valtype, double_type_node); parmval = convert (double_type_node, val); } } else if ((excess_precision && !type_generic) || (type_generic_overflow_p && parmnum == 2)) /* A "double" argument with excess precision being passed without a prototype or in variable arguments. The last argument of __builtin_*_overflow_p should not be promoted. */ parmval = convert (valtype, val); else if ((invalid_func_diag = targetm.calls.invalid_arg_for_unprototyped_fn (typelist, fundecl, val))) { error (invalid_func_diag); return -1; } else if (TREE_CODE (val) == ADDR_EXPR && reject_gcc_builtin (val)) { return -1; } else /* Convert `short' and `char' to full-size `int'. */ parmval = default_conversion (val); (*values)[parmnum] = parmval; if (parmval == error_mark_node) error_args = true; if (typetail) typetail = TREE_CHAIN (typetail); } gcc_assert (parmnum == vec_safe_length (values)); if (typetail != NULL_TREE && TREE_VALUE (typetail) != void_type_node) { error_at (loc, "too few arguments to function %qE", function); inform_declaration (fundecl); return -1; } return error_args ? -1 : (int) parmnum; } /* This is the entry point used by the parser to build unary operators in the input. CODE, a tree_code, specifies the unary operator, and ARG is the operand. For unary plus, the C parser currently uses CONVERT_EXPR for code. LOC is the location to use for the tree generated. */ struct c_expr parser_build_unary_op (location_t loc, enum tree_code code, struct c_expr arg) { struct c_expr result; result.original_code = code; result.original_type = NULL; if (reject_gcc_builtin (arg.value)) { result.value = error_mark_node; } else { result.value = build_unary_op (loc, code, arg.value, false); if (TREE_OVERFLOW_P (result.value) && !TREE_OVERFLOW_P (arg.value)) overflow_warning (loc, result.value, arg.value); } /* We are typically called when parsing a prefix token at LOC acting on ARG. Reflect this by updating the source range of the result to start at LOC and end at the end of ARG. */ set_c_expr_source_range (&result, loc, arg.get_finish ()); return result; } /* Returns true if TYPE is a character type, *not* including wchar_t. */ static bool char_type_p (tree type) { return (type == char_type_node || type == unsigned_char_type_node || type == signed_char_type_node || type == char16_type_node || type == char32_type_node); } /* This is the entry point used by the parser to build binary operators in the input. CODE, a tree_code, specifies the binary operator, and ARG1 and ARG2 are the operands. In addition to constructing the expression, we check for operands that were written with other binary operators in a way that is likely to confuse the user. LOCATION is the location of the binary operator. */ struct c_expr parser_build_binary_op (location_t location, enum tree_code code, struct c_expr arg1, struct c_expr arg2) { struct c_expr result; enum tree_code code1 = arg1.original_code; enum tree_code code2 = arg2.original_code; tree type1 = (arg1.original_type ? arg1.original_type : TREE_TYPE (arg1.value)); tree type2 = (arg2.original_type ? arg2.original_type : TREE_TYPE (arg2.value)); result.value = build_binary_op (location, code, arg1.value, arg2.value, true); result.original_code = code; result.original_type = NULL; if (TREE_CODE (result.value) == ERROR_MARK) { set_c_expr_source_range (&result, arg1.get_start (), arg2.get_finish ()); return result; } if (location != UNKNOWN_LOCATION) protected_set_expr_location (result.value, location); set_c_expr_source_range (&result, arg1.get_start (), arg2.get_finish ()); /* Check for cases such as x+y<<z which users are likely to misinterpret. */ if (warn_parentheses) warn_about_parentheses (location, code, code1, arg1.value, code2, arg2.value); if (warn_logical_op) warn_logical_operator (location, code, TREE_TYPE (result.value), code1, arg1.value, code2, arg2.value); if (warn_tautological_compare) { tree lhs = arg1.value; tree rhs = arg2.value; if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR) { if (C_MAYBE_CONST_EXPR_PRE (lhs) != NULL_TREE && TREE_SIDE_EFFECTS (C_MAYBE_CONST_EXPR_PRE (lhs))) lhs = NULL_TREE; else lhs = C_MAYBE_CONST_EXPR_EXPR (lhs); } if (TREE_CODE (rhs) == C_MAYBE_CONST_EXPR) { if (C_MAYBE_CONST_EXPR_PRE (rhs) != NULL_TREE && TREE_SIDE_EFFECTS (C_MAYBE_CONST_EXPR_PRE (rhs))) rhs = NULL_TREE; else rhs = C_MAYBE_CONST_EXPR_EXPR (rhs); } if (lhs != NULL_TREE && rhs != NULL_TREE) warn_tautological_cmp (location, code, lhs, rhs); } if (warn_logical_not_paren && TREE_CODE_CLASS (code) == tcc_comparison && code1 == TRUTH_NOT_EXPR && code2 != TRUTH_NOT_EXPR /* Avoid warning for !!x == y. */ && (TREE_CODE (arg1.value) != NE_EXPR || !integer_zerop (TREE_OPERAND (arg1.value, 1)))) { /* Avoid warning for !b == y where b has _Bool type. */ tree t = integer_zero_node; if (TREE_CODE (arg1.value) == EQ_EXPR && integer_zerop (TREE_OPERAND (arg1.value, 1)) && TREE_TYPE (TREE_OPERAND (arg1.value, 0)) == integer_type_node) { t = TREE_OPERAND (arg1.value, 0); do { if (TREE_TYPE (t) != integer_type_node) break; if (TREE_CODE (t) == C_MAYBE_CONST_EXPR) t = C_MAYBE_CONST_EXPR_EXPR (t); else if (CONVERT_EXPR_P (t)) t = TREE_OPERAND (t, 0); else break; } while (1); } if (TREE_CODE (TREE_TYPE (t)) != BOOLEAN_TYPE) warn_logical_not_parentheses (location, code, arg1.value, arg2.value); } /* Warn about comparisons against string literals, with the exception of testing for equality or inequality of a string literal with NULL. */ if (code == EQ_EXPR || code == NE_EXPR) { if ((code1 == STRING_CST && !integer_zerop (tree_strip_nop_conversions (arg2.value))) || (code2 == STRING_CST && !integer_zerop (tree_strip_nop_conversions (arg1.value)))) warning_at (location, OPT_Waddress, "comparison with string literal results in unspecified behavior"); /* Warn for ptr == '\0', it's likely that it should've been ptr[0]. */ if (POINTER_TYPE_P (type1) && null_pointer_constant_p (arg2.value) && char_type_p (type2) && warning_at (location, OPT_Wpointer_compare, "comparison between pointer and zero character " "constant")) inform (arg1.get_start (), "did you mean to dereference the pointer?"); else if (POINTER_TYPE_P (type2) && null_pointer_constant_p (arg1.value) && char_type_p (type1) && warning_at (location, OPT_Wpointer_compare, "comparison between pointer and zero character " "constant")) inform (arg2.get_start (), "did you mean to dereference the pointer?"); } else if (TREE_CODE_CLASS (code) == tcc_comparison && (code1 == STRING_CST || code2 == STRING_CST)) warning_at (location, OPT_Waddress, "comparison with string literal results in unspecified behavior"); if (TREE_OVERFLOW_P (result.value) && !TREE_OVERFLOW_P (arg1.value) && !TREE_OVERFLOW_P (arg2.value)) overflow_warning (location, result.value); /* Warn about comparisons of different enum types. */ if (warn_enum_compare && TREE_CODE_CLASS (code) == tcc_comparison && TREE_CODE (type1) == ENUMERAL_TYPE && TREE_CODE (type2) == ENUMERAL_TYPE && TYPE_MAIN_VARIANT (type1) != TYPE_MAIN_VARIANT (type2)) warning_at (location, OPT_Wenum_compare, "comparison between %qT and %qT", type1, type2); return result; } /* Return a tree for the difference of pointers OP0 and OP1. The resulting tree has type ptrdiff_t. If POINTER_SUBTRACT sanitization is enabled, assign to INSTRUMENT_EXPR call to libsanitizer. */ static tree pointer_diff (location_t loc, tree op0, tree op1, tree *instrument_expr) { tree restype = ptrdiff_type_node; tree result, inttype; addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op0))); addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op1))); tree target_type = TREE_TYPE (TREE_TYPE (op0)); tree orig_op1 = op1; /* If the operands point into different address spaces, we need to explicitly convert them to pointers into the common address space before we can subtract the numerical address values. */ if (as0 != as1) { addr_space_t as_common; tree common_type; /* Determine the common superset address space. This is guaranteed to exist because the caller verified that comp_target_types returned non-zero. */ if (!addr_space_superset (as0, as1, &as_common)) gcc_unreachable (); common_type = common_pointer_type (TREE_TYPE (op0), TREE_TYPE (op1)); op0 = convert (common_type, op0); op1 = convert (common_type, op1); } /* Determine integer type result of the subtraction. This will usually be the same as the result type (ptrdiff_t), but may need to be a wider type if pointers for the address space are wider than ptrdiff_t. */ if (TYPE_PRECISION (restype) < TYPE_PRECISION (TREE_TYPE (op0))) inttype = c_common_type_for_size (TYPE_PRECISION (TREE_TYPE (op0)), 0); else inttype = restype; if (TREE_CODE (target_type) == VOID_TYPE) pedwarn (loc, OPT_Wpointer_arith, "pointer of type %<void *%> used in subtraction"); if (TREE_CODE (target_type) == FUNCTION_TYPE) pedwarn (loc, OPT_Wpointer_arith, "pointer to a function used in subtraction"); if (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT)) { gcc_assert (current_function_decl != NULL_TREE); op0 = save_expr (op0); op1 = save_expr (op1); tree tt = builtin_decl_explicit (BUILT_IN_ASAN_POINTER_SUBTRACT); *instrument_expr = build_call_expr_loc (loc, tt, 2, op0, op1); } /* First do the subtraction, then build the divide operator and only convert at the very end. Do not do default conversions in case restype is a short type. */ /* POINTER_DIFF_EXPR requires a signed integer type of the same size as pointers. If some platform cannot provide that, or has a larger ptrdiff_type to support differences larger than half the address space, cast the pointers to some larger integer type and do the computations in that type. */ if (TYPE_PRECISION (inttype) > TYPE_PRECISION (TREE_TYPE (op0))) op0 = build_binary_op (loc, MINUS_EXPR, convert (inttype, op0), convert (inttype, op1), false); else { /* Cast away qualifiers. */ op0 = convert (c_common_type (TREE_TYPE (op0), TREE_TYPE (op0)), op0); op1 = convert (c_common_type (TREE_TYPE (op1), TREE_TYPE (op1)), op1); op0 = build2_loc (loc, POINTER_DIFF_EXPR, inttype, op0, op1); } /* This generates an error if op1 is pointer to incomplete type. */ if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (orig_op1)))) error_at (loc, "arithmetic on pointer to an incomplete type"); op1 = c_size_in_bytes (target_type); if (pointer_to_zero_sized_aggr_p (TREE_TYPE (orig_op1))) error_at (loc, "arithmetic on pointer to an empty aggregate"); /* Divide by the size, in easiest possible way. */ result = fold_build2_loc (loc, EXACT_DIV_EXPR, inttype, op0, convert (inttype, op1)); /* Convert to final result type if necessary. */ return convert (restype, result); } /* Expand atomic compound assignments into an appropriate sequence as specified by the C11 standard section 6.5.16.2. _Atomic T1 E1 T2 E2 E1 op= E2 This sequence is used for all types for which these operations are supported. In addition, built-in versions of the 'fe' prefixed routines may need to be invoked for floating point (real, complex or vector) when floating-point exceptions are supported. See 6.5.16.2 footnote 113. T1 newval; T1 old; T1 *addr T2 val fenv_t fenv addr = &E1; val = (E2); __atomic_load (addr, &old, SEQ_CST); feholdexcept (&fenv); loop: newval = old op val; if (__atomic_compare_exchange_strong (addr, &old, &newval, SEQ_CST, SEQ_CST)) goto done; feclearexcept (FE_ALL_EXCEPT); goto loop: done: feupdateenv (&fenv); The compiler will issue the __atomic_fetch_* built-in when possible, otherwise it will generate the generic form of the atomic operations. This requires temp(s) and has their address taken. The atomic processing is smart enough to figure out when the size of an object can utilize a lock-free version, and convert the built-in call to the appropriate lock-free routine. The optimizers will then dispose of any temps that are no longer required, and lock-free implementations are utilized as long as there is target support for the required size. If the operator is NOP_EXPR, then this is a simple assignment, and an __atomic_store is issued to perform the assignment rather than the above loop. */ /* Build an atomic assignment at LOC, expanding into the proper sequence to store LHS MODIFYCODE= RHS. Return a value representing the result of the operation, unless RETURN_OLD_P, in which case return the old value of LHS (this is only for postincrement and postdecrement). */ static tree build_atomic_assign (location_t loc, tree lhs, enum tree_code modifycode, tree rhs, bool return_old_p) { tree fndecl, func_call; vec<tree, va_gc> *params; tree val, nonatomic_lhs_type, nonatomic_rhs_type, newval, newval_addr; tree old, old_addr; tree compound_stmt; tree stmt, goto_stmt; tree loop_label, loop_decl, done_label, done_decl; tree lhs_type = TREE_TYPE (lhs); tree lhs_addr = build_unary_op (loc, ADDR_EXPR, lhs, false); tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST); tree rhs_semantic_type = TREE_TYPE (rhs); tree nonatomic_rhs_semantic_type; tree rhs_type; gcc_assert (TYPE_ATOMIC (lhs_type)); if (return_old_p) gcc_assert (modifycode == PLUS_EXPR || modifycode == MINUS_EXPR); /* Allocate enough vector items for a compare_exchange. */ vec_alloc (params, 6); /* Create a compound statement to hold the sequence of statements with a loop. */ compound_stmt = c_begin_compound_stmt (false); /* Remove any excess precision (which is only present here in the case of compound assignments). */ if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR) { gcc_assert (modifycode != NOP_EXPR); rhs = TREE_OPERAND (rhs, 0); } rhs_type = TREE_TYPE (rhs); /* Fold the RHS if it hasn't already been folded. */ if (modifycode != NOP_EXPR) rhs = c_fully_fold (rhs, false, NULL); /* Remove the qualifiers for the rest of the expressions and create the VAL temp variable to hold the RHS. */ nonatomic_lhs_type = build_qualified_type (lhs_type, TYPE_UNQUALIFIED); nonatomic_rhs_type = build_qualified_type (rhs_type, TYPE_UNQUALIFIED); nonatomic_rhs_semantic_type = build_qualified_type (rhs_semantic_type, TYPE_UNQUALIFIED); val = create_tmp_var_raw (nonatomic_rhs_type); TREE_ADDRESSABLE (val) = 1; TREE_NO_WARNING (val) = 1; rhs = build4 (TARGET_EXPR, nonatomic_rhs_type, val, rhs, NULL_TREE, NULL_TREE); SET_EXPR_LOCATION (rhs, loc); add_stmt (rhs); /* NOP_EXPR indicates it's a straight store of the RHS. Simply issue an atomic_store. */ if (modifycode == NOP_EXPR) { /* Build __atomic_store (&lhs, &val, SEQ_CST) */ rhs = build_unary_op (loc, ADDR_EXPR, val, false); fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_STORE); params->quick_push (lhs_addr); params->quick_push (rhs); params->quick_push (seq_cst); func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); add_stmt (func_call); /* Finish the compound statement. */ compound_stmt = c_end_compound_stmt (loc, compound_stmt, false); /* VAL is the value which was stored, return a COMPOUND_STMT of the statement and that value. */ return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, val); } /* Attempt to implement the atomic operation as an __atomic_fetch_* or __atomic_*_fetch built-in rather than a CAS loop. atomic_bool type isn't applicable for such builtins. ??? Do we want to handle enums? */ if ((TREE_CODE (lhs_type) == INTEGER_TYPE || POINTER_TYPE_P (lhs_type)) && TREE_CODE (rhs_type) == INTEGER_TYPE) { built_in_function fncode; switch (modifycode) { case PLUS_EXPR: case POINTER_PLUS_EXPR: fncode = (return_old_p ? BUILT_IN_ATOMIC_FETCH_ADD_N : BUILT_IN_ATOMIC_ADD_FETCH_N); break; case MINUS_EXPR: fncode = (return_old_p ? BUILT_IN_ATOMIC_FETCH_SUB_N : BUILT_IN_ATOMIC_SUB_FETCH_N); break; case BIT_AND_EXPR: fncode = (return_old_p ? BUILT_IN_ATOMIC_FETCH_AND_N : BUILT_IN_ATOMIC_AND_FETCH_N); break; case BIT_IOR_EXPR: fncode = (return_old_p ? BUILT_IN_ATOMIC_FETCH_OR_N : BUILT_IN_ATOMIC_OR_FETCH_N); break; case BIT_XOR_EXPR: fncode = (return_old_p ? BUILT_IN_ATOMIC_FETCH_XOR_N : BUILT_IN_ATOMIC_XOR_FETCH_N); break; default: goto cas_loop; } /* We can only use "_1" through "_16" variants of the atomic fetch built-ins. */ unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (lhs_type)); if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) goto cas_loop; /* If this is a pointer type, we need to multiply by the size of the pointer target type. */ if (POINTER_TYPE_P (lhs_type)) { if (!COMPLETE_TYPE_P (TREE_TYPE (lhs_type)) /* ??? This would introduce -Wdiscarded-qualifiers warning: __atomic_fetch_* expect volatile void * type as the first argument. (Assignments between atomic and non-atomic objects are OK.) */ || TYPE_RESTRICT (lhs_type)) goto cas_loop; tree sz = TYPE_SIZE_UNIT (TREE_TYPE (lhs_type)); rhs = fold_build2_loc (loc, MULT_EXPR, ptrdiff_type_node, convert (ptrdiff_type_node, rhs), convert (ptrdiff_type_node, sz)); } /* Build __atomic_fetch_* (&lhs, &val, SEQ_CST), or __atomic_*_fetch (&lhs, &val, SEQ_CST). */ fndecl = builtin_decl_explicit (fncode); params->quick_push (lhs_addr); params->quick_push (rhs); params->quick_push (seq_cst); func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); newval = create_tmp_var_raw (nonatomic_lhs_type); TREE_ADDRESSABLE (newval) = 1; TREE_NO_WARNING (newval) = 1; rhs = build4 (TARGET_EXPR, nonatomic_lhs_type, newval, func_call, NULL_TREE, NULL_TREE); SET_EXPR_LOCATION (rhs, loc); add_stmt (rhs); /* Finish the compound statement. */ compound_stmt = c_end_compound_stmt (loc, compound_stmt, false); /* NEWVAL is the value which was stored, return a COMPOUND_STMT of the statement and that value. */ return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, newval); } cas_loop: /* Create the variables and labels required for the op= form. */ old = create_tmp_var_raw (nonatomic_lhs_type); old_addr = build_unary_op (loc, ADDR_EXPR, old, false); TREE_ADDRESSABLE (old) = 1; TREE_NO_WARNING (old) = 1; newval = create_tmp_var_raw (nonatomic_lhs_type); newval_addr = build_unary_op (loc, ADDR_EXPR, newval, false); TREE_ADDRESSABLE (newval) = 1; TREE_NO_WARNING (newval) = 1; loop_decl = create_artificial_label (loc); loop_label = build1 (LABEL_EXPR, void_type_node, loop_decl); done_decl = create_artificial_label (loc); done_label = build1 (LABEL_EXPR, void_type_node, done_decl); /* __atomic_load (addr, &old, SEQ_CST). */ fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD); params->quick_push (lhs_addr); params->quick_push (old_addr); params->quick_push (seq_cst); func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); old = build4 (TARGET_EXPR, nonatomic_lhs_type, old, func_call, NULL_TREE, NULL_TREE); add_stmt (old); params->truncate (0); /* Create the expressions for floating-point environment manipulation, if required. */ bool need_fenv = (flag_trapping_math && (FLOAT_TYPE_P (lhs_type) || FLOAT_TYPE_P (rhs_type))); tree hold_call = NULL_TREE, clear_call = NULL_TREE, update_call = NULL_TREE; if (need_fenv) targetm.atomic_assign_expand_fenv (&hold_call, &clear_call, &update_call); if (hold_call) add_stmt (hold_call); /* loop: */ add_stmt (loop_label); /* newval = old + val; */ if (rhs_type != rhs_semantic_type) val = build1 (EXCESS_PRECISION_EXPR, nonatomic_rhs_semantic_type, val); rhs = build_binary_op (loc, modifycode, old, val, true); if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR) { tree eptype = TREE_TYPE (rhs); rhs = c_fully_fold (TREE_OPERAND (rhs, 0), false, NULL); rhs = build1 (EXCESS_PRECISION_EXPR, eptype, rhs); } else rhs = c_fully_fold (rhs, false, NULL); rhs = convert_for_assignment (loc, UNKNOWN_LOCATION, nonatomic_lhs_type, rhs, NULL_TREE, ic_assign, false, NULL_TREE, NULL_TREE, 0); if (rhs != error_mark_node) { rhs = build4 (TARGET_EXPR, nonatomic_lhs_type, newval, rhs, NULL_TREE, NULL_TREE); SET_EXPR_LOCATION (rhs, loc); add_stmt (rhs); } /* if (__atomic_compare_exchange (addr, &old, &new, false, SEQ_CST, SEQ_CST)) goto done; */ fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_COMPARE_EXCHANGE); params->quick_push (lhs_addr); params->quick_push (old_addr); params->quick_push (newval_addr); params->quick_push (integer_zero_node); params->quick_push (seq_cst); params->quick_push (seq_cst); func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); goto_stmt = build1 (GOTO_EXPR, void_type_node, done_decl); SET_EXPR_LOCATION (goto_stmt, loc); stmt = build3 (COND_EXPR, void_type_node, func_call, goto_stmt, NULL_TREE); SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); if (clear_call) add_stmt (clear_call); /* goto loop; */ goto_stmt = build1 (GOTO_EXPR, void_type_node, loop_decl); SET_EXPR_LOCATION (goto_stmt, loc); add_stmt (goto_stmt); /* done: */ add_stmt (done_label); if (update_call) add_stmt (update_call); /* Finish the compound statement. */ compound_stmt = c_end_compound_stmt (loc, compound_stmt, false); /* NEWVAL is the value that was successfully stored, return a COMPOUND_EXPR of the statement and the appropriate value. */ return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, return_old_p ? old : newval); } /* Construct and perhaps optimize a tree representation for a unary operation. CODE, a tree_code, specifies the operation and XARG is the operand. For any CODE other than ADDR_EXPR, NOCONVERT suppresses the default promotions (such as from short to int). For ADDR_EXPR, the default promotions are not applied; NOCONVERT allows non-lvalues; this is only used to handle conversion of non-lvalue arrays to pointers in C99. LOCATION is the location of the operator. */ tree build_unary_op (location_t location, enum tree_code code, tree xarg, bool noconvert) { /* No default_conversion here. It causes trouble for ADDR_EXPR. */ tree arg = xarg; tree argtype = NULL_TREE; enum tree_code typecode; tree val; tree ret = error_mark_node; tree eptype = NULL_TREE; const char *invalid_op_diag; bool int_operands; int_operands = EXPR_INT_CONST_OPERANDS (xarg); if (int_operands) arg = remove_c_maybe_const_expr (arg); if (code != ADDR_EXPR) arg = require_complete_type (location, arg); typecode = TREE_CODE (TREE_TYPE (arg)); if (typecode == ERROR_MARK) return error_mark_node; if (typecode == ENUMERAL_TYPE || typecode == BOOLEAN_TYPE) typecode = INTEGER_TYPE; if ((invalid_op_diag = targetm.invalid_unary_op (code, TREE_TYPE (xarg)))) { error_at (location, invalid_op_diag); return error_mark_node; } if (TREE_CODE (arg) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (arg); arg = TREE_OPERAND (arg, 0); } switch (code) { case CONVERT_EXPR: /* This is used for unary plus, because a CONVERT_EXPR is enough to prevent anybody from looking inside for associativity, but won't generate any code. */ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE || typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE || typecode == VECTOR_TYPE)) { error_at (location, "wrong type argument to unary plus"); return error_mark_node; } else if (!noconvert) arg = default_conversion (arg); arg = non_lvalue_loc (location, arg); break; case NEGATE_EXPR: if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE || typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE || typecode == VECTOR_TYPE)) { error_at (location, "wrong type argument to unary minus"); return error_mark_node; } else if (!noconvert) arg = default_conversion (arg); break; case BIT_NOT_EXPR: /* ~ works on integer types and non float vectors. */ if (typecode == INTEGER_TYPE || (typecode == VECTOR_TYPE && !VECTOR_FLOAT_TYPE_P (TREE_TYPE (arg)))) { tree e = arg; /* Warn if the expression has boolean value. */ while (TREE_CODE (e) == COMPOUND_EXPR) e = TREE_OPERAND (e, 1); if ((TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE || truth_value_p (TREE_CODE (e))) && warning_at (location, OPT_Wbool_operation, "%<~%> on a boolean expression")) { gcc_rich_location richloc (location); richloc.add_fixit_insert_before (location, "!"); inform (&richloc, "did you mean to use logical not?"); } if (!noconvert) arg = default_conversion (arg); } else if (typecode == COMPLEX_TYPE) { code = CONJ_EXPR; pedwarn (location, OPT_Wpedantic, "ISO C does not support %<~%> for complex conjugation"); if (!noconvert) arg = default_conversion (arg); } else { error_at (location, "wrong type argument to bit-complement"); return error_mark_node; } break; case ABS_EXPR: if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE)) { error_at (location, "wrong type argument to abs"); return error_mark_node; } else if (!noconvert) arg = default_conversion (arg); break; case CONJ_EXPR: /* Conjugating a real value is a no-op, but allow it anyway. */ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE || typecode == COMPLEX_TYPE)) { error_at (location, "wrong type argument to conjugation"); return error_mark_node; } else if (!noconvert) arg = default_conversion (arg); break; case TRUTH_NOT_EXPR: if (typecode != INTEGER_TYPE && typecode != FIXED_POINT_TYPE && typecode != REAL_TYPE && typecode != POINTER_TYPE && typecode != COMPLEX_TYPE) { error_at (location, "wrong type argument to unary exclamation mark"); return error_mark_node; } if (int_operands) { arg = c_objc_common_truthvalue_conversion (location, xarg); arg = remove_c_maybe_const_expr (arg); } else arg = c_objc_common_truthvalue_conversion (location, arg); ret = invert_truthvalue_loc (location, arg); /* If the TRUTH_NOT_EXPR has been folded, reset the location. */ if (EXPR_P (ret) && EXPR_HAS_LOCATION (ret)) location = EXPR_LOCATION (ret); goto return_build_unary_op; case REALPART_EXPR: case IMAGPART_EXPR: ret = build_real_imag_expr (location, code, arg); if (ret == error_mark_node) return error_mark_node; if (eptype && TREE_CODE (eptype) == COMPLEX_TYPE) eptype = TREE_TYPE (eptype); goto return_build_unary_op; case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR) { tree inner = build_unary_op (location, code, C_MAYBE_CONST_EXPR_EXPR (arg), noconvert); if (inner == error_mark_node) return error_mark_node; ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner), C_MAYBE_CONST_EXPR_PRE (arg), inner); gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg)); C_MAYBE_CONST_EXPR_NON_CONST (ret) = 1; goto return_build_unary_op; } /* Complain about anything that is not a true lvalue. In Objective-C, skip this check for property_refs. */ if (!objc_is_property_ref (arg) && !lvalue_or_else (location, arg, ((code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) ? lv_increment : lv_decrement))) return error_mark_node; if (warn_cxx_compat && TREE_CODE (TREE_TYPE (arg)) == ENUMERAL_TYPE) { if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) warning_at (location, OPT_Wc___compat, "increment of enumeration value is invalid in C++"); else warning_at (location, OPT_Wc___compat, "decrement of enumeration value is invalid in C++"); } if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE) { if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) warning_at (location, OPT_Wbool_operation, "increment of a boolean expression"); else warning_at (location, OPT_Wbool_operation, "decrement of a boolean expression"); } /* Ensure the argument is fully folded inside any SAVE_EXPR. */ arg = c_fully_fold (arg, false, NULL, true); bool atomic_op; atomic_op = really_atomic_lvalue (arg); /* Increment or decrement the real part of the value, and don't change the imaginary part. */ if (typecode == COMPLEX_TYPE) { tree real, imag; pedwarn (location, OPT_Wpedantic, "ISO C does not support %<++%> and %<--%> on complex types"); if (!atomic_op) { arg = stabilize_reference (arg); real = build_unary_op (EXPR_LOCATION (arg), REALPART_EXPR, arg, true); imag = build_unary_op (EXPR_LOCATION (arg), IMAGPART_EXPR, arg, true); real = build_unary_op (EXPR_LOCATION (arg), code, real, true); if (real == error_mark_node || imag == error_mark_node) return error_mark_node; ret = build2 (COMPLEX_EXPR, TREE_TYPE (arg), real, imag); goto return_build_unary_op; } } /* Report invalid types. */ if (typecode != POINTER_TYPE && typecode != FIXED_POINT_TYPE && typecode != INTEGER_TYPE && typecode != REAL_TYPE && typecode != COMPLEX_TYPE && typecode != VECTOR_TYPE) { if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) error_at (location, "wrong type argument to increment"); else error_at (location, "wrong type argument to decrement"); return error_mark_node; } { tree inc; argtype = TREE_TYPE (arg); /* Compute the increment. */ if (typecode == POINTER_TYPE) { /* If pointer target is an incomplete type, we just cannot know how to do the arithmetic. */ if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (argtype))) { if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) error_at (location, "increment of pointer to an incomplete type %qT", TREE_TYPE (argtype)); else error_at (location, "decrement of pointer to an incomplete type %qT", TREE_TYPE (argtype)); } else if (TREE_CODE (TREE_TYPE (argtype)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (argtype)) == VOID_TYPE) { if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) pedwarn (location, OPT_Wpointer_arith, "wrong type argument to increment"); else pedwarn (location, OPT_Wpointer_arith, "wrong type argument to decrement"); } inc = c_size_in_bytes (TREE_TYPE (argtype)); inc = convert_to_ptrofftype_loc (location, inc); } else if (FRACT_MODE_P (TYPE_MODE (argtype))) { /* For signed fract types, we invert ++ to -- or -- to ++, and change inc from 1 to -1, because it is not possible to represent 1 in signed fract constants. For unsigned fract types, the result always overflows and we get an undefined (original) or the maximum value. */ if (code == PREINCREMENT_EXPR) code = PREDECREMENT_EXPR; else if (code == PREDECREMENT_EXPR) code = PREINCREMENT_EXPR; else if (code == POSTINCREMENT_EXPR) code = POSTDECREMENT_EXPR; else /* code == POSTDECREMENT_EXPR */ code = POSTINCREMENT_EXPR; inc = integer_minus_one_node; inc = convert (argtype, inc); } else { inc = VECTOR_TYPE_P (argtype) ? build_one_cst (argtype) : integer_one_node; inc = convert (argtype, inc); } /* If 'arg' is an Objective-C PROPERTY_REF expression, then we need to ask Objective-C to build the increment or decrement expression for it. */ if (objc_is_property_ref (arg)) return objc_build_incr_expr_for_property_ref (location, code, arg, inc); /* Report a read-only lvalue. */ if (TYPE_READONLY (argtype)) { readonly_error (location, arg, ((code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) ? lv_increment : lv_decrement)); return error_mark_node; } else if (TREE_READONLY (arg)) readonly_warning (arg, ((code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) ? lv_increment : lv_decrement)); /* If the argument is atomic, use the special code sequences for atomic compound assignment. */ if (atomic_op) { arg = stabilize_reference (arg); ret = build_atomic_assign (location, arg, ((code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) ? PLUS_EXPR : MINUS_EXPR), (FRACT_MODE_P (TYPE_MODE (argtype)) ? inc : integer_one_node), (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)); goto return_build_unary_op; } if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE) val = boolean_increment (code, arg); else val = build2 (code, TREE_TYPE (arg), arg, inc); TREE_SIDE_EFFECTS (val) = 1; if (TREE_CODE (val) != code) TREE_NO_WARNING (val) = 1; ret = val; goto return_build_unary_op; } case ADDR_EXPR: /* Note that this operation never does default_conversion. */ /* The operand of unary '&' must be an lvalue (which excludes expressions of type void), or, in C99, the result of a [] or unary '*' operator. */ if (VOID_TYPE_P (TREE_TYPE (arg)) && TYPE_QUALS (TREE_TYPE (arg)) == TYPE_UNQUALIFIED && (!INDIRECT_REF_P (arg) || !flag_isoc99)) pedwarn (location, 0, "taking address of expression of type %<void%>"); /* Let &* cancel out to simplify resulting code. */ if (INDIRECT_REF_P (arg)) { /* Don't let this be an lvalue. */ if (lvalue_p (TREE_OPERAND (arg, 0))) return non_lvalue_loc (location, TREE_OPERAND (arg, 0)); ret = TREE_OPERAND (arg, 0); goto return_build_unary_op; } /* Anything not already handled and not a true memory reference or a non-lvalue array is an error. */ if (typecode != FUNCTION_TYPE && !noconvert && !lvalue_or_else (location, arg, lv_addressof)) return error_mark_node; /* Move address operations inside C_MAYBE_CONST_EXPR to simplify folding later. */ if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR) { tree inner = build_unary_op (location, code, C_MAYBE_CONST_EXPR_EXPR (arg), noconvert); ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner), C_MAYBE_CONST_EXPR_PRE (arg), inner); gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg)); C_MAYBE_CONST_EXPR_NON_CONST (ret) = C_MAYBE_CONST_EXPR_NON_CONST (arg); goto return_build_unary_op; } /* Ordinary case; arg is a COMPONENT_REF or a decl. */ argtype = TREE_TYPE (arg); /* If the lvalue is const or volatile, merge that into the type to which the address will point. This is only needed for function types. */ if ((DECL_P (arg) || REFERENCE_CLASS_P (arg)) && (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg)) && TREE_CODE (argtype) == FUNCTION_TYPE) { int orig_quals = TYPE_QUALS (strip_array_types (argtype)); int quals = orig_quals; if (TREE_READONLY (arg)) quals |= TYPE_QUAL_CONST; if (TREE_THIS_VOLATILE (arg)) quals |= TYPE_QUAL_VOLATILE; argtype = c_build_qualified_type (argtype, quals); } switch (TREE_CODE (arg)) { case COMPONENT_REF: if (DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1))) { error_at (location, "cannot take address of bit-field %qD", TREE_OPERAND (arg, 1)); return error_mark_node; } /* fall through */ case ARRAY_REF: if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (arg, 0)))) { if (!AGGREGATE_TYPE_P (TREE_TYPE (arg)) && !VECTOR_TYPE_P (TREE_TYPE (arg))) { error_at (location, "cannot take address of scalar with " "reverse storage order"); return error_mark_node; } if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (arg))) warning_at (location, OPT_Wscalar_storage_order, "address of array with reverse scalar storage " "order requested"); } default: break; } if (!c_mark_addressable (arg)) return error_mark_node; gcc_assert (TREE_CODE (arg) != COMPONENT_REF || !DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1))); argtype = build_pointer_type (argtype); /* ??? Cope with user tricks that amount to offsetof. Delete this when we have proper support for integer constant expressions. */ val = get_base_address (arg); if (val && INDIRECT_REF_P (val) && TREE_CONSTANT (TREE_OPERAND (val, 0))) { ret = fold_offsetof (arg, argtype); goto return_build_unary_op; } val = build1 (ADDR_EXPR, argtype, arg); ret = val; goto return_build_unary_op; default: gcc_unreachable (); } if (argtype == NULL_TREE) argtype = TREE_TYPE (arg); if (TREE_CODE (arg) == INTEGER_CST) ret = (require_constant_value ? fold_build1_initializer_loc (location, code, argtype, arg) : fold_build1_loc (location, code, argtype, arg)); else ret = build1 (code, argtype, arg); return_build_unary_op: gcc_assert (ret != error_mark_node); if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret) && !(TREE_CODE (xarg) == INTEGER_CST && !TREE_OVERFLOW (xarg))) ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret); else if (TREE_CODE (ret) != INTEGER_CST && int_operands) ret = note_integer_operands (ret); if (eptype) ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret); protected_set_expr_location (ret, location); return ret; } /* Return nonzero if REF is an lvalue valid for this language. Lvalues can be assigned, unless their type has TYPE_READONLY. Lvalues can have their address taken, unless they have C_DECL_REGISTER. */ bool lvalue_p (const_tree ref) { const enum tree_code code = TREE_CODE (ref); switch (code) { case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: return lvalue_p (TREE_OPERAND (ref, 0)); case C_MAYBE_CONST_EXPR: return lvalue_p (TREE_OPERAND (ref, 1)); case COMPOUND_LITERAL_EXPR: case STRING_CST: return true; case INDIRECT_REF: case ARRAY_REF: case VAR_DECL: case PARM_DECL: case RESULT_DECL: case ERROR_MARK: return (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE && TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE); case BIND_EXPR: return TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE; default: return false; } } /* Give a warning for storing in something that is read-only in GCC terms but not const in ISO C terms. */ static void readonly_warning (tree arg, enum lvalue_use use) { switch (use) { case lv_assign: warning (0, "assignment of read-only location %qE", arg); break; case lv_increment: warning (0, "increment of read-only location %qE", arg); break; case lv_decrement: warning (0, "decrement of read-only location %qE", arg); break; default: gcc_unreachable (); } return; } /* Return nonzero if REF is an lvalue valid for this language; otherwise, print an error message and return zero. USE says how the lvalue is being used and so selects the error message. LOCATION is the location at which any error should be reported. */ static int lvalue_or_else (location_t loc, const_tree ref, enum lvalue_use use) { int win = lvalue_p (ref); if (!win) lvalue_error (loc, use); return win; } /* Mark EXP saying that we need to be able to take the address of it; it should not be allocated in a register. Returns true if successful. ARRAY_REF_P is true if this is for ARRAY_REF construction - in that case we don't want to look through VIEW_CONVERT_EXPR from VECTOR_TYPE to ARRAY_TYPE, it is fine to use ARRAY_REFs for vector subscripts on vector register variables. */ bool c_mark_addressable (tree exp, bool array_ref_p) { tree x = exp; while (1) switch (TREE_CODE (x)) { case VIEW_CONVERT_EXPR: if (array_ref_p && TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (x, 0)))) return true; /* FALLTHRU */ case COMPONENT_REF: case ADDR_EXPR: case ARRAY_REF: case REALPART_EXPR: case IMAGPART_EXPR: x = TREE_OPERAND (x, 0); break; case COMPOUND_LITERAL_EXPR: case CONSTRUCTOR: TREE_ADDRESSABLE (x) = 1; return true; case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: if (C_DECL_REGISTER (x) && DECL_NONLOCAL (x)) { if (TREE_PUBLIC (x) || is_global_var (x)) { error ("global register variable %qD used in nested function", x); return false; } pedwarn (input_location, 0, "register variable %qD used in nested function", x); } else if (C_DECL_REGISTER (x)) { if (TREE_PUBLIC (x) || is_global_var (x)) error ("address of global register variable %qD requested", x); else error ("address of register variable %qD requested", x); return false; } /* FALLTHRU */ case FUNCTION_DECL: TREE_ADDRESSABLE (x) = 1; /* FALLTHRU */ default: return true; } } /* Convert EXPR to TYPE, warning about conversion problems with constants. SEMANTIC_TYPE is the type this conversion would use without excess precision. If SEMANTIC_TYPE is NULL, this function is equivalent to convert_and_check. This function is a wrapper that handles conversions that may be different than the usual ones because of excess precision. */ static tree ep_convert_and_check (location_t loc, tree type, tree expr, tree semantic_type) { if (TREE_TYPE (expr) == type) return expr; /* For C11, integer conversions may have results with excess precision. */ if (flag_isoc11 || !semantic_type) return convert_and_check (loc, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE && TREE_TYPE (expr) != semantic_type) { /* For integers, we need to check the real conversion, not the conversion to the excess precision type. */ expr = convert_and_check (loc, semantic_type, expr); } /* Result type is the excess precision type, which should be large enough, so do not check. */ return convert (type, expr); } /* Build and return a conditional expression IFEXP ? OP1 : OP2. If IFEXP_BCP then the condition is a call to __builtin_constant_p, and if folded to an integer constant then the unselected half may contain arbitrary operations not normally permitted in constant expressions. Set the location of the expression to LOC. */ tree build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp, tree op1, tree op1_original_type, location_t op1_loc, tree op2, tree op2_original_type, location_t op2_loc) { tree type1; tree type2; enum tree_code code1; enum tree_code code2; tree result_type = NULL; tree semantic_result_type = NULL; tree orig_op1 = op1, orig_op2 = op2; bool int_const, op1_int_operands, op2_int_operands, int_operands; bool ifexp_int_operands; tree ret; op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1); if (op1_int_operands) op1 = remove_c_maybe_const_expr (op1); op2_int_operands = EXPR_INT_CONST_OPERANDS (orig_op2); if (op2_int_operands) op2 = remove_c_maybe_const_expr (op2); ifexp_int_operands = EXPR_INT_CONST_OPERANDS (ifexp); if (ifexp_int_operands) ifexp = remove_c_maybe_const_expr (ifexp); /* Promote both alternatives. */ if (TREE_CODE (TREE_TYPE (op1)) != VOID_TYPE) op1 = default_conversion (op1); if (TREE_CODE (TREE_TYPE (op2)) != VOID_TYPE) op2 = default_conversion (op2); if (TREE_CODE (ifexp) == ERROR_MARK || TREE_CODE (TREE_TYPE (op1)) == ERROR_MARK || TREE_CODE (TREE_TYPE (op2)) == ERROR_MARK) return error_mark_node; type1 = TREE_TYPE (op1); code1 = TREE_CODE (type1); type2 = TREE_TYPE (op2); code2 = TREE_CODE (type2); if (code1 == POINTER_TYPE && reject_gcc_builtin (op1)) return error_mark_node; if (code2 == POINTER_TYPE && reject_gcc_builtin (op2)) return error_mark_node; /* C90 does not permit non-lvalue arrays in conditional expressions. In C99 they will be pointers by now. */ if (code1 == ARRAY_TYPE || code2 == ARRAY_TYPE) { error_at (colon_loc, "non-lvalue array in conditional expression"); return error_mark_node; } if ((TREE_CODE (op1) == EXCESS_PRECISION_EXPR || TREE_CODE (op2) == EXCESS_PRECISION_EXPR) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE) && (code2 == INTEGER_TYPE || code2 == REAL_TYPE || code2 == COMPLEX_TYPE)) { semantic_result_type = c_common_type (type1, type2); if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR) { op1 = TREE_OPERAND (op1, 0); type1 = TREE_TYPE (op1); gcc_assert (TREE_CODE (type1) == code1); } if (TREE_CODE (op2) == EXCESS_PRECISION_EXPR) { op2 = TREE_OPERAND (op2, 0); type2 = TREE_TYPE (op2); gcc_assert (TREE_CODE (type2) == code2); } } if (warn_cxx_compat) { tree t1 = op1_original_type ? op1_original_type : TREE_TYPE (orig_op1); tree t2 = op2_original_type ? op2_original_type : TREE_TYPE (orig_op2); if (TREE_CODE (t1) == ENUMERAL_TYPE && TREE_CODE (t2) == ENUMERAL_TYPE && TYPE_MAIN_VARIANT (t1) != TYPE_MAIN_VARIANT (t2)) warning_at (colon_loc, OPT_Wc___compat, ("different enum types in conditional is " "invalid in C++: %qT vs %qT"), t1, t2); } /* Quickly detect the usual case where op1 and op2 have the same type after promotion. */ if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2)) { if (type1 == type2) result_type = type1; else result_type = TYPE_MAIN_VARIANT (type1); } else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE) && (code2 == INTEGER_TYPE || code2 == REAL_TYPE || code2 == COMPLEX_TYPE)) { /* In C11, a conditional expression between a floating-point type and an integer type should convert the integer type to the evaluation format of the floating-point type, with possible excess precision. */ tree eptype1 = type1; tree eptype2 = type2; if (flag_isoc11) { tree eptype; if (ANY_INTEGRAL_TYPE_P (type1) && (eptype = excess_precision_type (type2)) != NULL_TREE) { eptype2 = eptype; if (!semantic_result_type) semantic_result_type = c_common_type (type1, type2); } else if (ANY_INTEGRAL_TYPE_P (type2) && (eptype = excess_precision_type (type1)) != NULL_TREE) { eptype1 = eptype; if (!semantic_result_type) semantic_result_type = c_common_type (type1, type2); } } result_type = c_common_type (eptype1, eptype2); if (result_type == error_mark_node) return error_mark_node; do_warn_double_promotion (result_type, type1, type2, "implicit conversion from %qT to %qT to " "match other result of conditional", colon_loc); /* If -Wsign-compare, warn here if type1 and type2 have different signedness. We'll promote the signed to unsigned and later code won't know it used to be different. Do this check on the original types, so that explicit casts will be considered, but default promotions won't. */ if (c_inhibit_evaluation_warnings == 0) { int unsigned_op1 = TYPE_UNSIGNED (TREE_TYPE (orig_op1)); int unsigned_op2 = TYPE_UNSIGNED (TREE_TYPE (orig_op2)); if (unsigned_op1 ^ unsigned_op2) { bool ovf; /* Do not warn if the result type is signed, since the signed type will only be chosen if it can represent all the values of the unsigned type. */ if (!TYPE_UNSIGNED (result_type)) /* OK */; else { bool op1_maybe_const = true; bool op2_maybe_const = true; /* Do not warn if the signed quantity is an unsuffixed integer literal (or some static constant expression involving such literals) and it is non-negative. This warning requires the operands to be folded for best results, so do that folding in this case even without warn_sign_compare to avoid warning options possibly affecting code generation. */ c_inhibit_evaluation_warnings += (ifexp == truthvalue_false_node); op1 = c_fully_fold (op1, require_constant_value, &op1_maybe_const); c_inhibit_evaluation_warnings -= (ifexp == truthvalue_false_node); c_inhibit_evaluation_warnings += (ifexp == truthvalue_true_node); op2 = c_fully_fold (op2, require_constant_value, &op2_maybe_const); c_inhibit_evaluation_warnings -= (ifexp == truthvalue_true_node); if (warn_sign_compare) { if ((unsigned_op2 && tree_expr_nonnegative_warnv_p (op1, &ovf)) || (unsigned_op1 && tree_expr_nonnegative_warnv_p (op2, &ovf))) /* OK */; else if (unsigned_op2) warning_at (op1_loc, OPT_Wsign_compare, "operand of ?: changes signedness from " "%qT to %qT due to unsignedness of other " "operand", TREE_TYPE (orig_op1), TREE_TYPE (orig_op2)); else warning_at (op2_loc, OPT_Wsign_compare, "operand of ?: changes signedness from " "%qT to %qT due to unsignedness of other " "operand", TREE_TYPE (orig_op2), TREE_TYPE (orig_op1)); } if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST) op1 = c_wrap_maybe_const (op1, !op1_maybe_const); if (!op2_maybe_const || TREE_CODE (op2) != INTEGER_CST) op2 = c_wrap_maybe_const (op2, !op2_maybe_const); } } } } else if (code1 == VOID_TYPE || code2 == VOID_TYPE) { if (code1 != VOID_TYPE || code2 != VOID_TYPE) pedwarn (colon_loc, OPT_Wpedantic, "ISO C forbids conditional expr with only one void side"); result_type = void_type_node; } else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE) { addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1)); addr_space_t as2 = TYPE_ADDR_SPACE (TREE_TYPE (type2)); addr_space_t as_common; if (comp_target_types (colon_loc, type1, type2)) result_type = common_pointer_type (type1, type2); else if (null_pointer_constant_p (orig_op1)) result_type = type2; else if (null_pointer_constant_p (orig_op2)) result_type = type1; else if (!addr_space_superset (as1, as2, &as_common)) { error_at (colon_loc, "pointers to disjoint address spaces " "used in conditional expression"); return error_mark_node; } else if (VOID_TYPE_P (TREE_TYPE (type1)) && !TYPE_ATOMIC (TREE_TYPE (type1))) { if ((TREE_CODE (TREE_TYPE (type2)) == ARRAY_TYPE) && (TYPE_QUALS (strip_array_types (TREE_TYPE (type2))) & ~TYPE_QUALS (TREE_TYPE (type1)))) warning_at (colon_loc, OPT_Wdiscarded_array_qualifiers, "pointer to array loses qualifier " "in conditional expression"); if (TREE_CODE (TREE_TYPE (type2)) == FUNCTION_TYPE) pedwarn (colon_loc, OPT_Wpedantic, "ISO C forbids conditional expr between " "%<void *%> and function pointer"); result_type = build_pointer_type (qualify_type (TREE_TYPE (type1), TREE_TYPE (type2))); } else if (VOID_TYPE_P (TREE_TYPE (type2)) && !TYPE_ATOMIC (TREE_TYPE (type2))) { if ((TREE_CODE (TREE_TYPE (type1)) == ARRAY_TYPE) && (TYPE_QUALS (strip_array_types (TREE_TYPE (type1))) & ~TYPE_QUALS (TREE_TYPE (type2)))) warning_at (colon_loc, OPT_Wdiscarded_array_qualifiers, "pointer to array loses qualifier " "in conditional expression"); if (TREE_CODE (TREE_TYPE (type1)) == FUNCTION_TYPE) pedwarn (colon_loc, OPT_Wpedantic, "ISO C forbids conditional expr between " "%<void *%> and function pointer"); result_type = build_pointer_type (qualify_type (TREE_TYPE (type2), TREE_TYPE (type1))); } /* Objective-C pointer comparisons are a bit more lenient. */ else if (objc_have_common_type (type1, type2, -3, NULL_TREE)) result_type = objc_common_type (type1, type2); else { int qual = ENCODE_QUAL_ADDR_SPACE (as_common); pedwarn (colon_loc, 0, "pointer type mismatch in conditional expression"); result_type = build_pointer_type (build_qualified_type (void_type_node, qual)); } } else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE) { if (!null_pointer_constant_p (orig_op2)) pedwarn (colon_loc, 0, "pointer/integer type mismatch in conditional expression"); else { op2 = null_pointer_node; } result_type = type1; } else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE) { if (!null_pointer_constant_p (orig_op1)) pedwarn (colon_loc, 0, "pointer/integer type mismatch in conditional expression"); else { op1 = null_pointer_node; } result_type = type2; } if (!result_type) { if (flag_cond_mismatch) result_type = void_type_node; else { error_at (colon_loc, "type mismatch in conditional expression"); return error_mark_node; } } /* Merge const and volatile flags of the incoming types. */ result_type = build_type_variant (result_type, TYPE_READONLY (type1) || TYPE_READONLY (type2), TYPE_VOLATILE (type1) || TYPE_VOLATILE (type2)); op1 = ep_convert_and_check (colon_loc, result_type, op1, semantic_result_type); op2 = ep_convert_and_check (colon_loc, result_type, op2, semantic_result_type); if (ifexp_bcp && ifexp == truthvalue_true_node) { op2_int_operands = true; op1 = c_fully_fold (op1, require_constant_value, NULL); } if (ifexp_bcp && ifexp == truthvalue_false_node) { op1_int_operands = true; op2 = c_fully_fold (op2, require_constant_value, NULL); } int_const = int_operands = (ifexp_int_operands && op1_int_operands && op2_int_operands); if (int_operands) { int_const = ((ifexp == truthvalue_true_node && TREE_CODE (orig_op1) == INTEGER_CST && !TREE_OVERFLOW (orig_op1)) || (ifexp == truthvalue_false_node && TREE_CODE (orig_op2) == INTEGER_CST && !TREE_OVERFLOW (orig_op2))); } /* Need to convert condition operand into a vector mask. */ if (VECTOR_TYPE_P (TREE_TYPE (ifexp))) { tree vectype = TREE_TYPE (ifexp); tree elem_type = TREE_TYPE (vectype); tree zero = build_int_cst (elem_type, 0); tree zero_vec = build_vector_from_val (vectype, zero); tree cmp_type = build_same_sized_truth_vector_type (vectype); ifexp = build2 (NE_EXPR, cmp_type, ifexp, zero_vec); } if (int_const || (ifexp_bcp && TREE_CODE (ifexp) == INTEGER_CST)) ret = fold_build3_loc (colon_loc, COND_EXPR, result_type, ifexp, op1, op2); else { if (int_operands) { /* Use c_fully_fold here, since C_MAYBE_CONST_EXPR might be nested inside of the expression. */ op1 = c_fully_fold (op1, false, NULL); op2 = c_fully_fold (op2, false, NULL); } ret = build3 (COND_EXPR, result_type, ifexp, op1, op2); if (int_operands) ret = note_integer_operands (ret); } if (semantic_result_type) ret = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, ret); protected_set_expr_location (ret, colon_loc); /* If the OP1 and OP2 are the same and don't have side-effects, warn here, because the COND_EXPR will be turned into OP1. */ if (warn_duplicated_branches && TREE_CODE (ret) == COND_EXPR && (op1 == op2 || operand_equal_p (op1, op2, 0))) warning_at (EXPR_LOCATION (ret), OPT_Wduplicated_branches, "this condition has identical branches"); return ret; } /* Return a compound expression that performs two expressions and returns the value of the second of them. LOC is the location of the COMPOUND_EXPR. */ tree build_compound_expr (location_t loc, tree expr1, tree expr2) { bool expr1_int_operands, expr2_int_operands; tree eptype = NULL_TREE; tree ret; expr1_int_operands = EXPR_INT_CONST_OPERANDS (expr1); if (expr1_int_operands) expr1 = remove_c_maybe_const_expr (expr1); expr2_int_operands = EXPR_INT_CONST_OPERANDS (expr2); if (expr2_int_operands) expr2 = remove_c_maybe_const_expr (expr2); if (TREE_CODE (expr1) == EXCESS_PRECISION_EXPR) expr1 = TREE_OPERAND (expr1, 0); if (TREE_CODE (expr2) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (expr2); expr2 = TREE_OPERAND (expr2, 0); } if (!TREE_SIDE_EFFECTS (expr1)) { /* The left-hand operand of a comma expression is like an expression statement: with -Wunused, we should warn if it doesn't have any side-effects, unless it was explicitly cast to (void). */ if (warn_unused_value) { if (VOID_TYPE_P (TREE_TYPE (expr1)) && CONVERT_EXPR_P (expr1)) ; /* (void) a, b */ else if (VOID_TYPE_P (TREE_TYPE (expr1)) && TREE_CODE (expr1) == COMPOUND_EXPR && CONVERT_EXPR_P (TREE_OPERAND (expr1, 1))) ; /* (void) a, (void) b, c */ else warning_at (loc, OPT_Wunused_value, "left-hand operand of comma expression has no effect"); } } else if (TREE_CODE (expr1) == COMPOUND_EXPR && warn_unused_value) { tree r = expr1; location_t cloc = loc; while (TREE_CODE (r) == COMPOUND_EXPR) { if (EXPR_HAS_LOCATION (r)) cloc = EXPR_LOCATION (r); r = TREE_OPERAND (r, 1); } if (!TREE_SIDE_EFFECTS (r) && !VOID_TYPE_P (TREE_TYPE (r)) && !CONVERT_EXPR_P (r)) warning_at (cloc, OPT_Wunused_value, "right-hand operand of comma expression has no effect"); } /* With -Wunused, we should also warn if the left-hand operand does have side-effects, but computes a value which is not used. For example, in `foo() + bar(), baz()' the result of the `+' operator is not used, so we should issue a warning. */ else if (warn_unused_value) warn_if_unused_value (expr1, loc); if (expr2 == error_mark_node) return error_mark_node; ret = build2 (COMPOUND_EXPR, TREE_TYPE (expr2), expr1, expr2); if (flag_isoc99 && expr1_int_operands && expr2_int_operands) ret = note_integer_operands (ret); if (eptype) ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret); protected_set_expr_location (ret, loc); return ret; } /* Issue -Wcast-qual warnings when appropriate. TYPE is the type to which we are casting. OTYPE is the type of the expression being cast. Both TYPE and OTYPE are pointer types. LOC is the location of the cast. -Wcast-qual appeared on the command line. Named address space qualifiers are not handled here, because they result in different warnings. */ static void handle_warn_cast_qual (location_t loc, tree type, tree otype) { tree in_type = type; tree in_otype = otype; int added = 0; int discarded = 0; bool is_const; /* Check that the qualifiers on IN_TYPE are a superset of the qualifiers of IN_OTYPE. The outermost level of POINTER_TYPE nodes is uninteresting and we stop as soon as we hit a non-POINTER_TYPE node on either type. */ do { in_otype = TREE_TYPE (in_otype); in_type = TREE_TYPE (in_type); /* GNU C allows cv-qualified function types. 'const' means the function is very pure, 'volatile' means it can't return. We need to warn when such qualifiers are added, not when they're taken away. */ if (TREE_CODE (in_otype) == FUNCTION_TYPE && TREE_CODE (in_type) == FUNCTION_TYPE) added |= (TYPE_QUALS_NO_ADDR_SPACE (in_type) & ~TYPE_QUALS_NO_ADDR_SPACE (in_otype)); else discarded |= (TYPE_QUALS_NO_ADDR_SPACE (in_otype) & ~TYPE_QUALS_NO_ADDR_SPACE (in_type)); } while (TREE_CODE (in_type) == POINTER_TYPE && TREE_CODE (in_otype) == POINTER_TYPE); if (added) warning_at (loc, OPT_Wcast_qual, "cast adds %q#v qualifier to function type", added); if (discarded) /* There are qualifiers present in IN_OTYPE that are not present in IN_TYPE. */ warning_at (loc, OPT_Wcast_qual, "cast discards %qv qualifier from pointer target type", discarded); if (added || discarded) return; /* A cast from **T to const **T is unsafe, because it can cause a const value to be changed with no additional warning. We only issue this warning if T is the same on both sides, and we only issue the warning if there are the same number of pointers on both sides, as otherwise the cast is clearly unsafe anyhow. A cast is unsafe when a qualifier is added at one level and const is not present at all outer levels. To issue this warning, we check at each level whether the cast adds new qualifiers not already seen. We don't need to special case function types, as they won't have the same TYPE_MAIN_VARIANT. */ if (TYPE_MAIN_VARIANT (in_type) != TYPE_MAIN_VARIANT (in_otype)) return; if (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE) return; in_type = type; in_otype = otype; is_const = TYPE_READONLY (TREE_TYPE (in_type)); do { in_type = TREE_TYPE (in_type); in_otype = TREE_TYPE (in_otype); if ((TYPE_QUALS (in_type) &~ TYPE_QUALS (in_otype)) != 0 && !is_const) { warning_at (loc, OPT_Wcast_qual, "to be safe all intermediate pointers in cast from " "%qT to %qT must be %<const%> qualified", otype, type); break; } if (is_const) is_const = TYPE_READONLY (in_type); } while (TREE_CODE (in_type) == POINTER_TYPE); } /* Heuristic check if two parameter types can be considered ABI-equivalent. */ static bool c_safe_arg_type_equiv_p (tree t1, tree t2) { t1 = TYPE_MAIN_VARIANT (t1); t2 = TYPE_MAIN_VARIANT (t2); if (TREE_CODE (t1) == POINTER_TYPE && TREE_CODE (t2) == POINTER_TYPE) return true; /* The signedness of the parameter matters only when an integral type smaller than int is promoted to int, otherwise only the precision of the parameter matters. This check should make sure that the callee does not see undefined values in argument registers. */ if (INTEGRAL_TYPE_P (t1) && INTEGRAL_TYPE_P (t2) && TYPE_PRECISION (t1) == TYPE_PRECISION (t2) && (TYPE_UNSIGNED (t1) == TYPE_UNSIGNED (t2) || !targetm.calls.promote_prototypes (NULL_TREE) || TYPE_PRECISION (t1) >= TYPE_PRECISION (integer_type_node))) return true; return comptypes (t1, t2); } /* Check if a type cast between two function types can be considered safe. */ static bool c_safe_function_type_cast_p (tree t1, tree t2) { if (TREE_TYPE (t1) == void_type_node && TYPE_ARG_TYPES (t1) == void_list_node) return true; if (TREE_TYPE (t2) == void_type_node && TYPE_ARG_TYPES (t2) == void_list_node) return true; if (!c_safe_arg_type_equiv_p (TREE_TYPE (t1), TREE_TYPE (t2))) return false; for (t1 = TYPE_ARG_TYPES (t1), t2 = TYPE_ARG_TYPES (t2); t1 && t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)) if (!c_safe_arg_type_equiv_p (TREE_VALUE (t1), TREE_VALUE (t2))) return false; return true; } /* Build an expression representing a cast to type TYPE of expression EXPR. LOC is the location of the cast-- typically the open paren of the cast. */ tree build_c_cast (location_t loc, tree type, tree expr) { tree value; if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR) expr = TREE_OPERAND (expr, 0); value = expr; if (type == error_mark_node || expr == error_mark_node) return error_mark_node; /* The ObjC front-end uses TYPE_MAIN_VARIANT to tie together types differing only in <protocol> qualifications. But when constructing cast expressions, the protocols do matter and must be kept around. */ if (objc_is_object_ptr (type) && objc_is_object_ptr (TREE_TYPE (expr))) return build1 (NOP_EXPR, type, expr); type = TYPE_MAIN_VARIANT (type); if (TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "cast specifies array type"); return error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "cast specifies function type"); return error_mark_node; } if (!VOID_TYPE_P (type)) { value = require_complete_type (loc, value); if (value == error_mark_node) return error_mark_node; } if (type == TYPE_MAIN_VARIANT (TREE_TYPE (value))) { if (RECORD_OR_UNION_TYPE_P (type)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids casting nonscalar to the same type"); /* Convert to remove any qualifiers from VALUE's type. */ value = convert (type, value); } else if (TREE_CODE (type) == UNION_TYPE) { tree field; for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_TYPE (field) != error_mark_node && comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)), TYPE_MAIN_VARIANT (TREE_TYPE (value)))) break; if (field) { tree t; bool maybe_const = true; pedwarn (loc, OPT_Wpedantic, "ISO C forbids casts to union type"); t = c_fully_fold (value, false, &maybe_const); t = build_constructor_single (type, field, t); if (!maybe_const) t = c_wrap_maybe_const (t, true); t = digest_init (loc, type, t, NULL_TREE, false, true, 0); TREE_CONSTANT (t) = TREE_CONSTANT (value); return t; } error_at (loc, "cast to union type from type not present in union"); return error_mark_node; } else { tree otype, ovalue; if (type == void_type_node) { tree t = build1 (CONVERT_EXPR, type, value); SET_EXPR_LOCATION (t, loc); return t; } otype = TREE_TYPE (value); /* Optionally warn about potentially worrisome casts. */ if (warn_cast_qual && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE) handle_warn_cast_qual (loc, type, otype); /* Warn about conversions between pointers to disjoint address spaces. */ if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && !null_pointer_constant_p (value)) { addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type)); addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (otype)); addr_space_t as_common; if (!addr_space_superset (as_to, as_from, &as_common)) { if (ADDR_SPACE_GENERIC_P (as_from)) warning_at (loc, 0, "cast to %s address space pointer " "from disjoint generic address space pointer", c_addr_space_name (as_to)); else if (ADDR_SPACE_GENERIC_P (as_to)) warning_at (loc, 0, "cast to generic address space pointer " "from disjoint %s address space pointer", c_addr_space_name (as_from)); else warning_at (loc, 0, "cast to %s address space pointer " "from disjoint %s address space pointer", c_addr_space_name (as_to), c_addr_space_name (as_from)); } } /* Warn about possible alignment problems. */ if ((STRICT_ALIGNMENT || warn_cast_align == 2) && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE /* Don't warn about opaque types, where the actual alignment restriction is unknown. */ && !(RECORD_OR_UNION_TYPE_P (TREE_TYPE (otype)) && TYPE_MODE (TREE_TYPE (otype)) == VOIDmode) && min_align_of_type (TREE_TYPE (type)) > min_align_of_type (TREE_TYPE (otype))) warning_at (loc, OPT_Wcast_align, "cast increases required alignment of target type"); if (TREE_CODE (type) == INTEGER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TYPE_PRECISION (type) != TYPE_PRECISION (otype)) /* Unlike conversion of integers to pointers, where the warning is disabled for converting constants because of cases such as SIG_*, warn about converting constant pointers to integers. In some cases it may cause unwanted sign extension, and a warning is appropriate. */ warning_at (loc, OPT_Wpointer_to_int_cast, "cast from pointer to integer of different size"); if (TREE_CODE (value) == CALL_EXPR && TREE_CODE (type) != TREE_CODE (otype)) warning_at (loc, OPT_Wbad_function_cast, "cast from function call of type %qT " "to non-matching type %qT", otype, type); if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == INTEGER_TYPE && TYPE_PRECISION (type) != TYPE_PRECISION (otype) /* Don't warn about converting any constant. */ && !TREE_CONSTANT (value)) warning_at (loc, OPT_Wint_to_pointer_cast, "cast to pointer from integer " "of different size"); if (warn_strict_aliasing <= 2) strict_aliasing_warning (EXPR_LOCATION (value), type, expr); /* If pedantic, warn for conversions between function and object pointer types, except for converting a null pointer constant to function pointer type. */ if (pedantic && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) pedwarn (loc, OPT_Wpedantic, "ISO C forbids " "conversion of function pointer to object pointer type"); if (pedantic && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE && !null_pointer_constant_p (value)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids " "conversion of object pointer to function pointer type"); if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE && TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE && !c_safe_function_type_cast_p (TREE_TYPE (type), TREE_TYPE (otype))) warning_at (loc, OPT_Wcast_function_type, "cast between incompatible function types" " from %qT to %qT", otype, type); ovalue = value; value = convert (type, value); /* Ignore any integer overflow caused by the cast. */ if (TREE_CODE (value) == INTEGER_CST && !FLOAT_TYPE_P (otype)) { if (CONSTANT_CLASS_P (ovalue) && TREE_OVERFLOW (ovalue)) { if (!TREE_OVERFLOW (value)) { /* Avoid clobbering a shared constant. */ value = copy_node (value); TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue); } } else if (TREE_OVERFLOW (value)) /* Reset VALUE's overflow flags, ensuring constant sharing. */ value = wide_int_to_tree (TREE_TYPE (value), wi::to_wide (value)); } } /* Don't let a cast be an lvalue. */ if (lvalue_p (value)) value = non_lvalue_loc (loc, value); /* Don't allow the results of casting to floating-point or complex types be confused with actual constants, or casts involving integer and pointer types other than direct integer-to-integer and integer-to-pointer be confused with integer constant expressions and null pointer constants. */ if (TREE_CODE (value) == REAL_CST || TREE_CODE (value) == COMPLEX_CST || (TREE_CODE (value) == INTEGER_CST && !((TREE_CODE (expr) == INTEGER_CST && INTEGRAL_TYPE_P (TREE_TYPE (expr))) || TREE_CODE (expr) == REAL_CST || TREE_CODE (expr) == COMPLEX_CST))) value = build1 (NOP_EXPR, type, value); protected_set_expr_location (value, loc); return value; } /* Interpret a cast of expression EXPR to type TYPE. LOC is the location of the open paren of the cast, or the position of the cast expr. */ tree c_cast_expr (location_t loc, struct c_type_name *type_name, tree expr) { tree type; tree type_expr = NULL_TREE; bool type_expr_const = true; tree ret; int saved_wsp = warn_strict_prototypes; /* This avoids warnings about unprototyped casts on integers. E.g. "#define SIG_DFL (void(*)())0". */ if (TREE_CODE (expr) == INTEGER_CST) warn_strict_prototypes = 0; type = groktypename (type_name, &type_expr, &type_expr_const); warn_strict_prototypes = saved_wsp; if (TREE_CODE (expr) == ADDR_EXPR && !VOID_TYPE_P (type) && reject_gcc_builtin (expr)) return error_mark_node; ret = build_c_cast (loc, type, expr); if (type_expr) { bool inner_expr_const = true; ret = c_fully_fold (ret, require_constant_value, &inner_expr_const); ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret), type_expr, ret); C_MAYBE_CONST_EXPR_NON_CONST (ret) = !(type_expr_const && inner_expr_const); SET_EXPR_LOCATION (ret, loc); } if (!EXPR_HAS_LOCATION (ret)) protected_set_expr_location (ret, loc); /* C++ does not permits types to be defined in a cast, but it allows references to incomplete types. */ if (warn_cxx_compat && type_name->specs->typespec_kind == ctsk_tagdef) warning_at (loc, OPT_Wc___compat, "defining a type in a cast is invalid in C++"); return ret; } /* Build an assignment expression of lvalue LHS from value RHS. If LHS_ORIGTYPE is not NULL, it is the original type of LHS, which may differ from TREE_TYPE (LHS) for an enum bitfield. MODIFYCODE is the code for a binary operator that we use to combine the old value of LHS with RHS to get the new value. Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment. If RHS_ORIGTYPE is not NULL_TREE, it is the original type of RHS, which may differ from TREE_TYPE (RHS) for an enum value. LOCATION is the location of the MODIFYCODE operator. RHS_LOC is the location of the RHS. */ tree build_modify_expr (location_t location, tree lhs, tree lhs_origtype, enum tree_code modifycode, location_t rhs_loc, tree rhs, tree rhs_origtype) { tree result; tree newrhs; tree rhseval = NULL_TREE; tree lhstype = TREE_TYPE (lhs); tree olhstype = lhstype; bool npc; bool is_atomic_op; /* Types that aren't fully specified cannot be used in assignments. */ lhs = require_complete_type (location, lhs); /* Avoid duplicate error messages from operands that had errors. */ if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK) return error_mark_node; /* Ensure an error for assigning a non-lvalue array to an array in C90. */ if (TREE_CODE (lhstype) == ARRAY_TYPE) { error_at (location, "assignment to expression with array type"); return error_mark_node; } /* For ObjC properties, defer this check. */ if (!objc_is_property_ref (lhs) && !lvalue_or_else (location, lhs, lv_assign)) return error_mark_node; is_atomic_op = really_atomic_lvalue (lhs); newrhs = rhs; if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR) { tree inner = build_modify_expr (location, C_MAYBE_CONST_EXPR_EXPR (lhs), lhs_origtype, modifycode, rhs_loc, rhs, rhs_origtype); if (inner == error_mark_node) return error_mark_node; result = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner), C_MAYBE_CONST_EXPR_PRE (lhs), inner); gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (lhs)); C_MAYBE_CONST_EXPR_NON_CONST (result) = 1; protected_set_expr_location (result, location); return result; } /* If a binary op has been requested, combine the old LHS value with the RHS producing the value we should actually store into the LHS. */ if (modifycode != NOP_EXPR) { lhs = c_fully_fold (lhs, false, NULL, true); lhs = stabilize_reference (lhs); /* Construct the RHS for any non-atomic compound assignemnt. */ if (!is_atomic_op) { /* If in LHS op= RHS the RHS has side-effects, ensure they are preevaluated before the rest of the assignment expression's side-effects, because RHS could contain e.g. function calls that modify LHS. */ if (TREE_SIDE_EFFECTS (rhs)) { if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR) newrhs = save_expr (TREE_OPERAND (rhs, 0)); else newrhs = save_expr (rhs); rhseval = newrhs; if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR) newrhs = build1 (EXCESS_PRECISION_EXPR, TREE_TYPE (rhs), newrhs); } newrhs = build_binary_op (location, modifycode, lhs, newrhs, true); /* The original type of the right hand side is no longer meaningful. */ rhs_origtype = NULL_TREE; } } if (c_dialect_objc ()) { /* Check if we are modifying an Objective-C property reference; if so, we need to generate setter calls. */ if (TREE_CODE (newrhs) == EXCESS_PRECISION_EXPR) result = objc_maybe_build_modify_expr (lhs, TREE_OPERAND (newrhs, 0)); else result = objc_maybe_build_modify_expr (lhs, newrhs); if (result) goto return_result; /* Else, do the check that we postponed for Objective-C. */ if (!lvalue_or_else (location, lhs, lv_assign)) return error_mark_node; } /* Give an error for storing in something that is 'const'. */ if (TYPE_READONLY (lhstype) || (RECORD_OR_UNION_TYPE_P (lhstype) && C_TYPE_FIELDS_READONLY (lhstype))) { readonly_error (location, lhs, lv_assign); return error_mark_node; } else if (TREE_READONLY (lhs)) readonly_warning (lhs, lv_assign); /* If storing into a structure or union member, it has probably been given type `int'. Compute the type that would go with the actual amount of storage the member occupies. */ if (TREE_CODE (lhs) == COMPONENT_REF && (TREE_CODE (lhstype) == INTEGER_TYPE || TREE_CODE (lhstype) == BOOLEAN_TYPE || TREE_CODE (lhstype) == REAL_TYPE || TREE_CODE (lhstype) == ENUMERAL_TYPE)) lhstype = TREE_TYPE (get_unwidened (lhs, 0)); /* If storing in a field that is in actuality a short or narrower than one, we must store in the field in its actual type. */ if (lhstype != TREE_TYPE (lhs)) { lhs = copy_node (lhs); TREE_TYPE (lhs) = lhstype; } /* Issue -Wc++-compat warnings about an assignment to an enum type when LHS does not have its original type. This happens for, e.g., an enum bitfield in a struct. */ if (warn_cxx_compat && lhs_origtype != NULL_TREE && lhs_origtype != lhstype && TREE_CODE (lhs_origtype) == ENUMERAL_TYPE) { tree checktype = (rhs_origtype != NULL_TREE ? rhs_origtype : TREE_TYPE (rhs)); if (checktype != error_mark_node && (TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (lhs_origtype) || (is_atomic_op && modifycode != NOP_EXPR))) warning_at (location, OPT_Wc___compat, "enum conversion in assignment is invalid in C++"); } /* If the lhs is atomic, remove that qualifier. */ if (is_atomic_op) { lhstype = build_qualified_type (lhstype, (TYPE_QUALS (lhstype) & ~TYPE_QUAL_ATOMIC)); olhstype = build_qualified_type (olhstype, (TYPE_QUALS (lhstype) & ~TYPE_QUAL_ATOMIC)); } /* Convert new value to destination type. Fold it first, then restore any excess precision information, for the sake of conversion warnings. */ if (!(is_atomic_op && modifycode != NOP_EXPR)) { tree rhs_semantic_type = NULL_TREE; if (TREE_CODE (newrhs) == EXCESS_PRECISION_EXPR) { rhs_semantic_type = TREE_TYPE (newrhs); newrhs = TREE_OPERAND (newrhs, 0); } npc = null_pointer_constant_p (newrhs); newrhs = c_fully_fold (newrhs, false, NULL); if (rhs_semantic_type) newrhs = build1 (EXCESS_PRECISION_EXPR, rhs_semantic_type, newrhs); newrhs = convert_for_assignment (location, rhs_loc, lhstype, newrhs, rhs_origtype, ic_assign, npc, NULL_TREE, NULL_TREE, 0); if (TREE_CODE (newrhs) == ERROR_MARK) return error_mark_node; } /* Emit ObjC write barrier, if necessary. */ if (c_dialect_objc () && flag_objc_gc) { result = objc_generate_write_barrier (lhs, modifycode, newrhs); if (result) { protected_set_expr_location (result, location); goto return_result; } } /* Scan operands. */ if (is_atomic_op) result = build_atomic_assign (location, lhs, modifycode, newrhs, false); else { result = build2 (MODIFY_EXPR, lhstype, lhs, newrhs); TREE_SIDE_EFFECTS (result) = 1; protected_set_expr_location (result, location); } /* If we got the LHS in a different type for storing in, convert the result back to the nominal type of LHS so that the value we return always has the same type as the LHS argument. */ if (olhstype == TREE_TYPE (result)) goto return_result; result = convert_for_assignment (location, rhs_loc, olhstype, result, rhs_origtype, ic_assign, false, NULL_TREE, NULL_TREE, 0); protected_set_expr_location (result, location); return_result: if (rhseval) result = build2 (COMPOUND_EXPR, TREE_TYPE (result), rhseval, result); return result; } /* Return whether STRUCT_TYPE has an anonymous field with type TYPE. This is used to implement -fplan9-extensions. */ static bool find_anonymous_field_with_type (tree struct_type, tree type) { tree field; bool found; gcc_assert (RECORD_OR_UNION_TYPE_P (struct_type)); found = false; for (field = TYPE_FIELDS (struct_type); field != NULL_TREE; field = TREE_CHAIN (field)) { tree fieldtype = (TYPE_ATOMIC (TREE_TYPE (field)) ? c_build_qualified_type (TREE_TYPE (field), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (TREE_TYPE (field))); if (DECL_NAME (field) == NULL && comptypes (type, fieldtype)) { if (found) return false; found = true; } else if (DECL_NAME (field) == NULL && RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)) && find_anonymous_field_with_type (TREE_TYPE (field), type)) { if (found) return false; found = true; } } return found; } /* RHS is an expression whose type is pointer to struct. If there is an anonymous field in RHS with type TYPE, then return a pointer to that field in RHS. This is used with -fplan9-extensions. This returns NULL if no conversion could be found. */ static tree convert_to_anonymous_field (location_t location, tree type, tree rhs) { tree rhs_struct_type, lhs_main_type; tree field, found_field; bool found_sub_field; tree ret; gcc_assert (POINTER_TYPE_P (TREE_TYPE (rhs))); rhs_struct_type = TREE_TYPE (TREE_TYPE (rhs)); gcc_assert (RECORD_OR_UNION_TYPE_P (rhs_struct_type)); gcc_assert (POINTER_TYPE_P (type)); lhs_main_type = (TYPE_ATOMIC (TREE_TYPE (type)) ? c_build_qualified_type (TREE_TYPE (type), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (TREE_TYPE (type))); found_field = NULL_TREE; found_sub_field = false; for (field = TYPE_FIELDS (rhs_struct_type); field != NULL_TREE; field = TREE_CHAIN (field)) { if (DECL_NAME (field) != NULL_TREE || !RECORD_OR_UNION_TYPE_P (TREE_TYPE (field))) continue; tree fieldtype = (TYPE_ATOMIC (TREE_TYPE (field)) ? c_build_qualified_type (TREE_TYPE (field), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (TREE_TYPE (field))); if (comptypes (lhs_main_type, fieldtype)) { if (found_field != NULL_TREE) return NULL_TREE; found_field = field; } else if (find_anonymous_field_with_type (TREE_TYPE (field), lhs_main_type)) { if (found_field != NULL_TREE) return NULL_TREE; found_field = field; found_sub_field = true; } } if (found_field == NULL_TREE) return NULL_TREE; ret = fold_build3_loc (location, COMPONENT_REF, TREE_TYPE (found_field), build_fold_indirect_ref (rhs), found_field, NULL_TREE); ret = build_fold_addr_expr_loc (location, ret); if (found_sub_field) { ret = convert_to_anonymous_field (location, type, ret); gcc_assert (ret != NULL_TREE); } return ret; } /* Issue an error message for a bad initializer component. GMSGID identifies the message. The component name is taken from the spelling stack. */ static void error_init (location_t loc, const char *gmsgid) { char *ofwhat; /* The gmsgid may be a format string with %< and %>. */ error_at (loc, gmsgid); ofwhat = print_spelling ((char *) alloca (spelling_length () + 1)); if (*ofwhat) inform (loc, "(near initialization for %qs)", ofwhat); } /* Issue a pedantic warning for a bad initializer component. OPT is the option OPT_* (from options.h) controlling this warning or 0 if it is unconditionally given. GMSGID identifies the message. The component name is taken from the spelling stack. */ static void ATTRIBUTE_GCC_DIAG (3,0) pedwarn_init (location_t loc, int opt, const char *gmsgid, ...) { /* Use the location where a macro was expanded rather than where it was defined to make sure macros defined in system headers but used incorrectly elsewhere are diagnosed. */ source_location exploc = expansion_point_location_if_in_system_header (loc); va_list ap; va_start (ap, gmsgid); bool warned = emit_diagnostic_valist (DK_PEDWARN, exploc, opt, gmsgid, &ap); va_end (ap); char *ofwhat = print_spelling ((char *) alloca (spelling_length () + 1)); if (*ofwhat && warned) inform (exploc, "(near initialization for %qs)", ofwhat); } /* Issue a warning for a bad initializer component. OPT is the OPT_W* value corresponding to the warning option that controls this warning. GMSGID identifies the message. The component name is taken from the spelling stack. */ static void warning_init (location_t loc, int opt, const char *gmsgid) { char *ofwhat; bool warned; /* Use the location where a macro was expanded rather than where it was defined to make sure macros defined in system headers but used incorrectly elsewhere are diagnosed. */ source_location exploc = expansion_point_location_if_in_system_header (loc); /* The gmsgid may be a format string with %< and %>. */ warned = warning_at (exploc, opt, gmsgid); ofwhat = print_spelling ((char *) alloca (spelling_length () + 1)); if (*ofwhat && warned) inform (exploc, "(near initialization for %qs)", ofwhat); } /* If TYPE is an array type and EXPR is a parenthesized string constant, warn if pedantic that EXPR is being used to initialize an object of type TYPE. */ void maybe_warn_string_init (location_t loc, tree type, struct c_expr expr) { if (pedantic && TREE_CODE (type) == ARRAY_TYPE && TREE_CODE (expr.value) == STRING_CST && expr.original_code != STRING_CST) pedwarn_init (loc, OPT_Wpedantic, "array initialized from parenthesized string constant"); } /* Attempt to locate the parameter with the given index within FNDECL, returning DECL_SOURCE_LOCATION (FNDECL) if it can't be found. */ static location_t get_fndecl_argument_location (tree fndecl, int argnum) { int i; tree param; /* Locate param by index within DECL_ARGUMENTS (fndecl). */ for (i = 0, param = DECL_ARGUMENTS (fndecl); i < argnum && param; i++, param = TREE_CHAIN (param)) ; /* If something went wrong (e.g. if we have a builtin and thus no arguments), return DECL_SOURCE_LOCATION (FNDECL). */ if (param == NULL) return DECL_SOURCE_LOCATION (fndecl); return DECL_SOURCE_LOCATION (param); } /* Issue a note about a mismatching argument for parameter PARMNUM to FUNDECL, for types EXPECTED_TYPE and ACTUAL_TYPE. Attempt to issue the note at the pertinent parameter of the decl; failing that issue it at the location of FUNDECL; failing that issue it at PLOC. */ static void inform_for_arg (tree fundecl, location_t ploc, int parmnum, tree expected_type, tree actual_type) { location_t loc; if (fundecl && !DECL_IS_BUILTIN (fundecl)) loc = get_fndecl_argument_location (fundecl, parmnum - 1); else loc = ploc; inform (loc, "expected %qT but argument is of type %qT", expected_type, actual_type); } /* Convert value RHS to type TYPE as preparation for an assignment to an lvalue of type TYPE. If ORIGTYPE is not NULL_TREE, it is the original type of RHS; this differs from TREE_TYPE (RHS) for enum types. NULL_POINTER_CONSTANT says whether RHS was a null pointer constant before any folding. The real work of conversion is done by `convert'. The purpose of this function is to generate error messages for assignments that are not allowed in C. ERRTYPE says whether it is argument passing, assignment, initialization or return. In the following example, '~' denotes where EXPR_LOC and '^' where LOCATION point to: f (var); [ic_argpass] ^ ~~~ x = var; [ic_assign] ^ ~~~; int x = var; [ic_init] ^^^ return x; [ic_return] ^ FUNCTION is a tree for the function being called. PARMNUM is the number of the argument, for printing in error messages. */ static tree convert_for_assignment (location_t location, location_t expr_loc, tree type, tree rhs, tree origtype, enum impl_conv errtype, bool null_pointer_constant, tree fundecl, tree function, int parmnum) { enum tree_code codel = TREE_CODE (type); tree orig_rhs = rhs; tree rhstype; enum tree_code coder; tree rname = NULL_TREE; bool objc_ok = false; /* Use the expansion point location to handle cases such as user's function returning a wrong-type macro defined in a system header. */ location = expansion_point_location_if_in_system_header (location); if (errtype == ic_argpass) { tree selector; /* Change pointer to function to the function itself for diagnostics. */ if (TREE_CODE (function) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL) function = TREE_OPERAND (function, 0); /* Handle an ObjC selector specially for diagnostics. */ selector = objc_message_selector (); rname = function; if (selector && parmnum > 2) { rname = selector; parmnum -= 2; } } /* This macro is used to emit diagnostics to ensure that all format strings are complete sentences, visible to gettext and checked at compile time. */ #define PEDWARN_FOR_ASSIGNMENT(LOCATION, PLOC, OPT, AR, AS, IN, RE) \ do { \ switch (errtype) \ { \ case ic_argpass: \ if (pedwarn (PLOC, OPT, AR, parmnum, rname)) \ inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \ break; \ case ic_assign: \ pedwarn (LOCATION, OPT, AS); \ break; \ case ic_init: \ pedwarn_init (LOCATION, OPT, IN); \ break; \ case ic_return: \ pedwarn (LOCATION, OPT, RE); \ break; \ default: \ gcc_unreachable (); \ } \ } while (0) /* This macro is used to emit diagnostics to ensure that all format strings are complete sentences, visible to gettext and checked at compile time. It is the same as PEDWARN_FOR_ASSIGNMENT but with an extra parameter to enumerate qualifiers. */ #define PEDWARN_FOR_QUALIFIERS(LOCATION, PLOC, OPT, AR, AS, IN, RE, QUALS) \ do { \ switch (errtype) \ { \ case ic_argpass: \ if (pedwarn (PLOC, OPT, AR, parmnum, rname, QUALS)) \ inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \ break; \ case ic_assign: \ pedwarn (LOCATION, OPT, AS, QUALS); \ break; \ case ic_init: \ pedwarn (LOCATION, OPT, IN, QUALS); \ break; \ case ic_return: \ pedwarn (LOCATION, OPT, RE, QUALS); \ break; \ default: \ gcc_unreachable (); \ } \ } while (0) /* This macro is used to emit diagnostics to ensure that all format strings are complete sentences, visible to gettext and checked at compile time. It is the same as PEDWARN_FOR_QUALIFIERS but uses warning_at instead of pedwarn. */ #define WARNING_FOR_QUALIFIERS(LOCATION, PLOC, OPT, AR, AS, IN, RE, QUALS) \ do { \ switch (errtype) \ { \ case ic_argpass: \ if (warning_at (PLOC, OPT, AR, parmnum, rname, QUALS)) \ inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \ break; \ case ic_assign: \ warning_at (LOCATION, OPT, AS, QUALS); \ break; \ case ic_init: \ warning_at (LOCATION, OPT, IN, QUALS); \ break; \ case ic_return: \ warning_at (LOCATION, OPT, RE, QUALS); \ break; \ default: \ gcc_unreachable (); \ } \ } while (0) if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR) rhs = TREE_OPERAND (rhs, 0); rhstype = TREE_TYPE (rhs); coder = TREE_CODE (rhstype); if (coder == ERROR_MARK) return error_mark_node; if (c_dialect_objc ()) { int parmno; switch (errtype) { case ic_return: parmno = 0; break; case ic_assign: parmno = -1; break; case ic_init: parmno = -2; break; default: parmno = parmnum; break; } objc_ok = objc_compare_types (type, rhstype, parmno, rname); } if (warn_cxx_compat) { tree checktype = origtype != NULL_TREE ? origtype : rhstype; if (checktype != error_mark_node && TREE_CODE (type) == ENUMERAL_TYPE && TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (type)) switch (errtype) { case ic_argpass: if (pedwarn (expr_loc, OPT_Wc___compat, "enum conversion when " "passing argument %d of %qE is invalid in C++", parmnum, rname)) inform ((fundecl && !DECL_IS_BUILTIN (fundecl)) ? DECL_SOURCE_LOCATION (fundecl) : expr_loc, "expected %qT but argument is of type %qT", type, rhstype); break; case ic_assign: pedwarn (location, OPT_Wc___compat, "enum conversion from %qT to " "%qT in assignment is invalid in C++", rhstype, type); break; case ic_init: pedwarn_init (location, OPT_Wc___compat, "enum conversion from " "%qT to %qT in initialization is invalid in C++", rhstype, type); break; case ic_return: pedwarn (location, OPT_Wc___compat, "enum conversion from %qT to " "%qT in return is invalid in C++", rhstype, type); break; default: gcc_unreachable (); } } if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype)) return rhs; if (coder == VOID_TYPE) { /* Except for passing an argument to an unprototyped function, this is a constraint violation. When passing an argument to an unprototyped function, it is compile-time undefined; making it a constraint in that case was rejected in DR#252. */ error_at (location, "void value not ignored as it ought to be"); return error_mark_node; } rhs = require_complete_type (location, rhs); if (rhs == error_mark_node) return error_mark_node; if (coder == POINTER_TYPE && reject_gcc_builtin (rhs)) return error_mark_node; /* A non-reference type can convert to a reference. This handles va_start, va_copy and possibly port built-ins. */ if (codel == REFERENCE_TYPE && coder != REFERENCE_TYPE) { if (!lvalue_p (rhs)) { error_at (location, "cannot pass rvalue to reference parameter"); return error_mark_node; } if (!c_mark_addressable (rhs)) return error_mark_node; rhs = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (rhs)), rhs); SET_EXPR_LOCATION (rhs, location); rhs = convert_for_assignment (location, expr_loc, build_pointer_type (TREE_TYPE (type)), rhs, origtype, errtype, null_pointer_constant, fundecl, function, parmnum); if (rhs == error_mark_node) return error_mark_node; rhs = build1 (NOP_EXPR, type, rhs); SET_EXPR_LOCATION (rhs, location); return rhs; } /* Some types can interconvert without explicit casts. */ else if (codel == VECTOR_TYPE && coder == VECTOR_TYPE && vector_types_convertible_p (type, TREE_TYPE (rhs), true)) return convert (type, rhs); /* Arithmetic types all interconvert, and enum is treated like int. */ else if ((codel == INTEGER_TYPE || codel == REAL_TYPE || codel == FIXED_POINT_TYPE || codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE || codel == BOOLEAN_TYPE) && (coder == INTEGER_TYPE || coder == REAL_TYPE || coder == FIXED_POINT_TYPE || coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE || coder == BOOLEAN_TYPE)) { tree ret; bool save = in_late_binary_op; if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE || (coder == REAL_TYPE && (codel == INTEGER_TYPE || codel == ENUMERAL_TYPE) && sanitize_flags_p (SANITIZE_FLOAT_CAST))) in_late_binary_op = true; ret = convert_and_check (expr_loc != UNKNOWN_LOCATION ? expr_loc : location, type, orig_rhs); in_late_binary_op = save; return ret; } /* Aggregates in different TUs might need conversion. */ if ((codel == RECORD_TYPE || codel == UNION_TYPE) && codel == coder && comptypes (type, rhstype)) return convert_and_check (expr_loc != UNKNOWN_LOCATION ? expr_loc : location, type, rhs); /* Conversion to a transparent union or record from its member types. This applies only to function arguments. */ if (((codel == UNION_TYPE || codel == RECORD_TYPE) && TYPE_TRANSPARENT_AGGR (type)) && errtype == ic_argpass) { tree memb, marginal_memb = NULL_TREE; for (memb = TYPE_FIELDS (type); memb ; memb = DECL_CHAIN (memb)) { tree memb_type = TREE_TYPE (memb); if (comptypes (TYPE_MAIN_VARIANT (memb_type), TYPE_MAIN_VARIANT (rhstype))) break; if (TREE_CODE (memb_type) != POINTER_TYPE) continue; if (coder == POINTER_TYPE) { tree ttl = TREE_TYPE (memb_type); tree ttr = TREE_TYPE (rhstype); /* Any non-function converts to a [const][volatile] void * and vice versa; otherwise, targets must be the same. Meanwhile, the lhs target must have all the qualifiers of the rhs. */ if ((VOID_TYPE_P (ttl) && !TYPE_ATOMIC (ttl)) || (VOID_TYPE_P (ttr) && !TYPE_ATOMIC (ttr)) || comp_target_types (location, memb_type, rhstype)) { int lquals = TYPE_QUALS (ttl) & ~TYPE_QUAL_ATOMIC; int rquals = TYPE_QUALS (ttr) & ~TYPE_QUAL_ATOMIC; /* If this type won't generate any warnings, use it. */ if (lquals == rquals || ((TREE_CODE (ttr) == FUNCTION_TYPE && TREE_CODE (ttl) == FUNCTION_TYPE) ? ((lquals | rquals) == rquals) : ((lquals | rquals) == lquals))) break; /* Keep looking for a better type, but remember this one. */ if (!marginal_memb) marginal_memb = memb; } } /* Can convert integer zero to any pointer type. */ if (null_pointer_constant) { rhs = null_pointer_node; break; } } if (memb || marginal_memb) { if (!memb) { /* We have only a marginally acceptable member type; it needs a warning. */ tree ttl = TREE_TYPE (TREE_TYPE (marginal_memb)); tree ttr = TREE_TYPE (rhstype); /* Const and volatile mean something different for function types, so the usual warnings are not appropriate. */ if (TREE_CODE (ttr) == FUNCTION_TYPE && TREE_CODE (ttl) == FUNCTION_TYPE) { /* Because const and volatile on functions are restrictions that say the function will not do certain things, it is okay to use a const or volatile function where an ordinary one is wanted, but not vice-versa. */ if (TYPE_QUALS_NO_ADDR_SPACE (ttl) & ~TYPE_QUALS_NO_ADDR_SPACE (ttr)) PEDWARN_FOR_QUALIFIERS (location, expr_loc, OPT_Wdiscarded_qualifiers, G_("passing argument %d of %qE " "makes %q#v qualified function " "pointer from unqualified"), G_("assignment makes %q#v qualified " "function pointer from " "unqualified"), G_("initialization makes %q#v qualified " "function pointer from " "unqualified"), G_("return makes %q#v qualified function " "pointer from unqualified"), TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr)); } else if (TYPE_QUALS_NO_ADDR_SPACE (ttr) & ~TYPE_QUALS_NO_ADDR_SPACE (ttl)) PEDWARN_FOR_QUALIFIERS (location, expr_loc, OPT_Wdiscarded_qualifiers, G_("passing argument %d of %qE discards " "%qv qualifier from pointer target type"), G_("assignment discards %qv qualifier " "from pointer target type"), G_("initialization discards %qv qualifier " "from pointer target type"), G_("return discards %qv qualifier from " "pointer target type"), TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl)); memb = marginal_memb; } if (!fundecl || !DECL_IN_SYSTEM_HEADER (fundecl)) pedwarn (location, OPT_Wpedantic, "ISO C prohibits argument conversion to union type"); rhs = fold_convert_loc (location, TREE_TYPE (memb), rhs); return build_constructor_single (type, memb, rhs); } } /* Conversions among pointers */ else if ((codel == POINTER_TYPE || codel == REFERENCE_TYPE) && (coder == codel)) { tree ttl = TREE_TYPE (type); tree ttr = TREE_TYPE (rhstype); tree mvl = ttl; tree mvr = ttr; bool is_opaque_pointer; int target_cmp = 0; /* Cache comp_target_types () result. */ addr_space_t asl; addr_space_t asr; if (TREE_CODE (mvl) != ARRAY_TYPE) mvl = (TYPE_ATOMIC (mvl) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mvl), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mvl)); if (TREE_CODE (mvr) != ARRAY_TYPE) mvr = (TYPE_ATOMIC (mvr) ? c_build_qualified_type (TYPE_MAIN_VARIANT (mvr), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (mvr)); /* Opaque pointers are treated like void pointers. */ is_opaque_pointer = vector_targets_convertible_p (ttl, ttr); /* The Plan 9 compiler permits a pointer to a struct to be automatically converted into a pointer to an anonymous field within the struct. */ if (flag_plan9_extensions && RECORD_OR_UNION_TYPE_P (mvl) && RECORD_OR_UNION_TYPE_P (mvr) && mvl != mvr) { tree new_rhs = convert_to_anonymous_field (location, type, rhs); if (new_rhs != NULL_TREE) { rhs = new_rhs; rhstype = TREE_TYPE (rhs); coder = TREE_CODE (rhstype); ttr = TREE_TYPE (rhstype); mvr = TYPE_MAIN_VARIANT (ttr); } } /* C++ does not allow the implicit conversion void* -> T*. However, for the purpose of reducing the number of false positives, we tolerate the special case of int *p = NULL; where NULL is typically defined in C to be '(void *) 0'. */ if (VOID_TYPE_P (ttr) && rhs != null_pointer_node && !VOID_TYPE_P (ttl)) warning_at (errtype == ic_argpass ? expr_loc : location, OPT_Wc___compat, "request for implicit conversion " "from %qT to %qT not permitted in C++", rhstype, type); /* See if the pointers point to incompatible address spaces. */ asl = TYPE_ADDR_SPACE (ttl); asr = TYPE_ADDR_SPACE (ttr); if (!null_pointer_constant_p (rhs) && asr != asl && !targetm.addr_space.subset_p (asr, asl)) { switch (errtype) { case ic_argpass: error_at (expr_loc, "passing argument %d of %qE from pointer to " "non-enclosed address space", parmnum, rname); break; case ic_assign: error_at (location, "assignment from pointer to " "non-enclosed address space"); break; case ic_init: error_at (location, "initialization from pointer to " "non-enclosed address space"); break; case ic_return: error_at (location, "return from pointer to " "non-enclosed address space"); break; default: gcc_unreachable (); } return error_mark_node; } /* Check if the right-hand side has a format attribute but the left-hand side doesn't. */ if (warn_suggest_attribute_format && check_missing_format_attribute (type, rhstype)) { switch (errtype) { case ic_argpass: warning_at (expr_loc, OPT_Wsuggest_attribute_format, "argument %d of %qE might be " "a candidate for a format attribute", parmnum, rname); break; case ic_assign: warning_at (location, OPT_Wsuggest_attribute_format, "assignment left-hand side might be " "a candidate for a format attribute"); break; case ic_init: warning_at (location, OPT_Wsuggest_attribute_format, "initialization left-hand side might be " "a candidate for a format attribute"); break; case ic_return: warning_at (location, OPT_Wsuggest_attribute_format, "return type might be " "a candidate for a format attribute"); break; default: gcc_unreachable (); } } /* Any non-function converts to a [const][volatile] void * and vice versa; otherwise, targets must be the same. Meanwhile, the lhs target must have all the qualifiers of the rhs. */ if ((VOID_TYPE_P (ttl) && !TYPE_ATOMIC (ttl)) || (VOID_TYPE_P (ttr) && !TYPE_ATOMIC (ttr)) || (target_cmp = comp_target_types (location, type, rhstype)) || is_opaque_pointer || ((c_common_unsigned_type (mvl) == c_common_unsigned_type (mvr)) && (c_common_signed_type (mvl) == c_common_signed_type (mvr)) && TYPE_ATOMIC (mvl) == TYPE_ATOMIC (mvr))) { /* Warn about loss of qualifers from pointers to arrays with qualifiers on the element type. */ if (TREE_CODE (ttr) == ARRAY_TYPE) { ttr = strip_array_types (ttr); ttl = strip_array_types (ttl); if (TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttr) & ~TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttl)) WARNING_FOR_QUALIFIERS (location, expr_loc, OPT_Wdiscarded_array_qualifiers, G_("passing argument %d of %qE discards " "%qv qualifier from pointer target type"), G_("assignment discards %qv qualifier " "from pointer target type"), G_("initialization discards %qv qualifier " "from pointer target type"), G_("return discards %qv qualifier from " "pointer target type"), TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl)); } else if (pedantic && ((VOID_TYPE_P (ttl) && TREE_CODE (ttr) == FUNCTION_TYPE) || (VOID_TYPE_P (ttr) && !null_pointer_constant && TREE_CODE (ttl) == FUNCTION_TYPE))) PEDWARN_FOR_ASSIGNMENT (location, expr_loc, OPT_Wpedantic, G_("ISO C forbids passing argument %d of " "%qE between function pointer " "and %<void *%>"), G_("ISO C forbids assignment between " "function pointer and %<void *%>"), G_("ISO C forbids initialization between " "function pointer and %<void *%>"), G_("ISO C forbids return between function " "pointer and %<void *%>")); /* Const and volatile mean something different for function types, so the usual warnings are not appropriate. */ else if (TREE_CODE (ttr) != FUNCTION_TYPE && TREE_CODE (ttl) != FUNCTION_TYPE) { /* Don't warn about loss of qualifier for conversions from qualified void* to pointers to arrays with corresponding qualifier on the element type. */ if (!pedantic) ttl = strip_array_types (ttl); /* Assignments between atomic and non-atomic objects are OK. */ if (TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttr) & ~TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttl)) { PEDWARN_FOR_QUALIFIERS (location, expr_loc, OPT_Wdiscarded_qualifiers, G_("passing argument %d of %qE discards " "%qv qualifier from pointer target type"), G_("assignment discards %qv qualifier " "from pointer target type"), G_("initialization discards %qv qualifier " "from pointer target type"), G_("return discards %qv qualifier from " "pointer target type"), TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl)); } /* If this is not a case of ignoring a mismatch in signedness, no warning. */ else if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr) || target_cmp) ; /* If there is a mismatch, do warn. */ else if (warn_pointer_sign) switch (errtype) { case ic_argpass: if (pedwarn (expr_loc, OPT_Wpointer_sign, "pointer targets in passing argument %d of " "%qE differ in signedness", parmnum, rname)) inform ((fundecl && !DECL_IS_BUILTIN (fundecl)) ? DECL_SOURCE_LOCATION (fundecl) : expr_loc, "expected %qT but argument is of type %qT", type, rhstype); break; case ic_assign: pedwarn (location, OPT_Wpointer_sign, "pointer targets in assignment from %qT to %qT " "differ in signedness", rhstype, type); break; case ic_init: pedwarn_init (location, OPT_Wpointer_sign, "pointer targets in initialization of %qT " "from %qT differ in signedness", type, rhstype); break; case ic_return: pedwarn (location, OPT_Wpointer_sign, "pointer targets in " "returning %qT from a function with return type " "%qT differ in signedness", rhstype, type); break; default: gcc_unreachable (); } } else if (TREE_CODE (ttl) == FUNCTION_TYPE && TREE_CODE (ttr) == FUNCTION_TYPE) { /* Because const and volatile on functions are restrictions that say the function will not do certain things, it is okay to use a const or volatile function where an ordinary one is wanted, but not vice-versa. */ if (TYPE_QUALS_NO_ADDR_SPACE (ttl) & ~TYPE_QUALS_NO_ADDR_SPACE (ttr)) PEDWARN_FOR_QUALIFIERS (location, expr_loc, OPT_Wdiscarded_qualifiers, G_("passing argument %d of %qE makes " "%q#v qualified function pointer " "from unqualified"), G_("assignment makes %q#v qualified function " "pointer from unqualified"), G_("initialization makes %q#v qualified " "function pointer from unqualified"), G_("return makes %q#v qualified function " "pointer from unqualified"), TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr)); } } /* Avoid warning about the volatile ObjC EH puts on decls. */ else if (!objc_ok) { switch (errtype) { case ic_argpass: if (pedwarn (expr_loc, OPT_Wincompatible_pointer_types, "passing argument %d of %qE from incompatible " "pointer type", parmnum, rname)) inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype); break; case ic_assign: pedwarn (location, OPT_Wincompatible_pointer_types, "assignment to %qT from incompatible pointer type %qT", type, rhstype); break; case ic_init: pedwarn_init (location, OPT_Wincompatible_pointer_types, "initialization of %qT from incompatible pointer " "type %qT", type, rhstype); break; case ic_return: pedwarn (location, OPT_Wincompatible_pointer_types, "returning %qT from a function with incompatible " "return type %qT", rhstype, type); break; default: gcc_unreachable (); } } return convert (type, rhs); } else if (codel == POINTER_TYPE && coder == ARRAY_TYPE) { /* ??? This should not be an error when inlining calls to unprototyped functions. */ error_at (location, "invalid use of non-lvalue array"); return error_mark_node; } else if (codel == POINTER_TYPE && coder == INTEGER_TYPE) { /* An explicit constant 0 can convert to a pointer, or one that results from arithmetic, even including a cast to integer type. */ if (!null_pointer_constant) switch (errtype) { case ic_argpass: if (pedwarn (expr_loc, OPT_Wint_conversion, "passing argument %d of %qE makes pointer from " "integer without a cast", parmnum, rname)) inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype); break; case ic_assign: pedwarn (location, OPT_Wint_conversion, "assignment to %qT from %qT makes pointer from integer " "without a cast", type, rhstype); break; case ic_init: pedwarn_init (location, OPT_Wint_conversion, "initialization of %qT from %qT makes pointer from " "integer without a cast", type, rhstype); break; case ic_return: pedwarn (location, OPT_Wint_conversion, "returning %qT from a " "function with return type %qT makes pointer from " "integer without a cast", rhstype, type); break; default: gcc_unreachable (); } return convert (type, rhs); } else if (codel == INTEGER_TYPE && coder == POINTER_TYPE) { switch (errtype) { case ic_argpass: if (pedwarn (expr_loc, OPT_Wint_conversion, "passing argument %d of %qE makes integer from " "pointer without a cast", parmnum, rname)) inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype); break; case ic_assign: pedwarn (location, OPT_Wint_conversion, "assignment to %qT from %qT makes integer from pointer " "without a cast", type, rhstype); break; case ic_init: pedwarn_init (location, OPT_Wint_conversion, "initialization of %qT from %qT makes integer from " "pointer without a cast", type, rhstype); break; case ic_return: pedwarn (location, OPT_Wint_conversion, "returning %qT from a " "function with return type %qT makes integer from " "pointer without a cast", rhstype, type); break; default: gcc_unreachable (); } return convert (type, rhs); } else if (codel == BOOLEAN_TYPE && coder == POINTER_TYPE) { tree ret; bool save = in_late_binary_op; in_late_binary_op = true; ret = convert (type, rhs); in_late_binary_op = save; return ret; } switch (errtype) { case ic_argpass: error_at (expr_loc, "incompatible type for argument %d of %qE", parmnum, rname); inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype); break; case ic_assign: error_at (location, "incompatible types when assigning to type %qT from " "type %qT", type, rhstype); break; case ic_init: error_at (location, "incompatible types when initializing type %qT using type %qT", type, rhstype); break; case ic_return: error_at (location, "incompatible types when returning type %qT but %qT was " "expected", rhstype, type); break; default: gcc_unreachable (); } return error_mark_node; } /* If VALUE is a compound expr all of whose expressions are constant, then return its value. Otherwise, return error_mark_node. This is for handling COMPOUND_EXPRs as initializer elements which is allowed with a warning when -pedantic is specified. */ static tree valid_compound_expr_initializer (tree value, tree endtype) { if (TREE_CODE (value) == COMPOUND_EXPR) { if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype) == error_mark_node) return error_mark_node; return valid_compound_expr_initializer (TREE_OPERAND (value, 1), endtype); } else if (!initializer_constant_valid_p (value, endtype)) return error_mark_node; else return value; } /* Perform appropriate conversions on the initial value of a variable, store it in the declaration DECL, and print any error messages that are appropriate. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the init is invalid, store an ERROR_MARK. INIT_LOC is the location of the initial value. */ void store_init_value (location_t init_loc, tree decl, tree init, tree origtype) { tree value, type; bool npc = false; /* If variable's type was invalidly declared, just ignore it. */ type = TREE_TYPE (decl); if (TREE_CODE (type) == ERROR_MARK) return; /* Digest the specified initializer into an expression. */ if (init) npc = null_pointer_constant_p (init); value = digest_init (init_loc, type, init, origtype, npc, true, TREE_STATIC (decl)); /* Store the expression if valid; else report error. */ if (!in_system_header_at (input_location) && AGGREGATE_TYPE_P (TREE_TYPE (decl)) && !TREE_STATIC (decl)) warning (OPT_Wtraditional, "traditional C rejects automatic " "aggregate initialization"); if (value != error_mark_node || TREE_CODE (decl) != FUNCTION_DECL) DECL_INITIAL (decl) = value; /* ANSI wants warnings about out-of-range constant initializers. */ STRIP_TYPE_NOPS (value); if (TREE_STATIC (decl)) constant_expression_warning (value); /* Check if we need to set array size from compound literal size. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE && value != error_mark_node) { tree inside_init = init; STRIP_TYPE_NOPS (inside_init); inside_init = fold (inside_init); if (TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR) { tree cldecl = COMPOUND_LITERAL_EXPR_DECL (inside_init); if (TYPE_DOMAIN (TREE_TYPE (cldecl))) { /* For int foo[] = (int [3]){1}; we need to set array size now since later on array initializer will be just the brace enclosed list of the compound literal. */ tree etype = strip_array_types (TREE_TYPE (decl)); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (cldecl)); layout_type (type); layout_decl (cldecl, 0); TREE_TYPE (decl) = c_build_qualified_type (type, TYPE_QUALS (etype)); } } } } /* Methods for storing and printing names for error messages. */ /* Implement a spelling stack that allows components of a name to be pushed and popped. Each element on the stack is this structure. */ struct spelling { int kind; union { unsigned HOST_WIDE_INT i; const char *s; } u; }; #define SPELLING_STRING 1 #define SPELLING_MEMBER 2 #define SPELLING_BOUNDS 3 static struct spelling *spelling; /* Next stack element (unused). */ static struct spelling *spelling_base; /* Spelling stack base. */ static int spelling_size; /* Size of the spelling stack. */ /* Macros to save and restore the spelling stack around push_... functions. Alternative to SAVE_SPELLING_STACK. */ #define SPELLING_DEPTH() (spelling - spelling_base) #define RESTORE_SPELLING_DEPTH(DEPTH) (spelling = spelling_base + (DEPTH)) /* Push an element on the spelling stack with type KIND and assign VALUE to MEMBER. */ #define PUSH_SPELLING(KIND, VALUE, MEMBER) \ { \ int depth = SPELLING_DEPTH (); \ \ if (depth >= spelling_size) \ { \ spelling_size += 10; \ spelling_base = XRESIZEVEC (struct spelling, spelling_base, \ spelling_size); \ RESTORE_SPELLING_DEPTH (depth); \ } \ \ spelling->kind = (KIND); \ spelling->MEMBER = (VALUE); \ spelling++; \ } /* Push STRING on the stack. Printed literally. */ static void push_string (const char *string) { PUSH_SPELLING (SPELLING_STRING, string, u.s); } /* Push a member name on the stack. Printed as '.' STRING. */ static void push_member_name (tree decl) { const char *const string = (DECL_NAME (decl) ? identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl))) : _("<anonymous>")); PUSH_SPELLING (SPELLING_MEMBER, string, u.s); } /* Push an array bounds on the stack. Printed as [BOUNDS]. */ static void push_array_bounds (unsigned HOST_WIDE_INT bounds) { PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i); } /* Compute the maximum size in bytes of the printed spelling. */ static int spelling_length (void) { int size = 0; struct spelling *p; for (p = spelling_base; p < spelling; p++) { if (p->kind == SPELLING_BOUNDS) size += 25; else size += strlen (p->u.s) + 1; } return size; } /* Print the spelling to BUFFER and return it. */ static char * print_spelling (char *buffer) { char *d = buffer; struct spelling *p; for (p = spelling_base; p < spelling; p++) if (p->kind == SPELLING_BOUNDS) { sprintf (d, "[" HOST_WIDE_INT_PRINT_UNSIGNED "]", p->u.i); d += strlen (d); } else { const char *s; if (p->kind == SPELLING_MEMBER) *d++ = '.'; for (s = p->u.s; (*d = *s++); d++) ; } *d++ = '\0'; return buffer; } /* Digest the parser output INIT as an initializer for type TYPE. Return a C expression of type TYPE to represent the initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. NULL_POINTER_CONSTANT is true if INIT is a null pointer constant. If INIT is a string constant, STRICT_STRING is true if it is unparenthesized or we should not warn here for it being parenthesized. For other types of INIT, STRICT_STRING is not used. INIT_LOC is the location of the INIT. REQUIRE_CONSTANT requests an error if non-constant initializers or elements are seen. */ static tree digest_init (location_t init_loc, tree type, tree init, tree origtype, bool null_pointer_constant, bool strict_string, int require_constant) { enum tree_code code = TREE_CODE (type); tree inside_init = init; tree semantic_type = NULL_TREE; bool maybe_const = true; if (type == error_mark_node || !init || error_operand_p (init)) return error_mark_node; STRIP_TYPE_NOPS (inside_init); if (TREE_CODE (inside_init) == EXCESS_PRECISION_EXPR) { semantic_type = TREE_TYPE (inside_init); inside_init = TREE_OPERAND (inside_init, 0); } inside_init = c_fully_fold (inside_init, require_constant, &maybe_const); /* Initialization of an array of chars from a string constant optionally enclosed in braces. */ if (code == ARRAY_TYPE && inside_init && TREE_CODE (inside_init) == STRING_CST) { tree typ1 = (TYPE_ATOMIC (TREE_TYPE (type)) ? c_build_qualified_type (TYPE_MAIN_VARIANT (TREE_TYPE (type)), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (TREE_TYPE (type))); /* Note that an array could be both an array of character type and an array of wchar_t if wchar_t is signed char or unsigned char. */ bool char_array = (typ1 == char_type_node || typ1 == signed_char_type_node || typ1 == unsigned_char_type_node); bool wchar_array = !!comptypes (typ1, wchar_type_node); bool char16_array = !!comptypes (typ1, char16_type_node); bool char32_array = !!comptypes (typ1, char32_type_node); if (char_array || wchar_array || char16_array || char32_array) { struct c_expr expr; tree typ2 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init))); expr.value = inside_init; expr.original_code = (strict_string ? STRING_CST : ERROR_MARK); expr.original_type = NULL; maybe_warn_string_init (init_loc, type, expr); if (TYPE_DOMAIN (type) && !TYPE_MAX_VALUE (TYPE_DOMAIN (type))) pedwarn_init (init_loc, OPT_Wpedantic, "initialization of a flexible array member"); if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)), TYPE_MAIN_VARIANT (type))) return inside_init; if (char_array) { if (typ2 != char_type_node) { error_init (init_loc, "char-array initialized from wide " "string"); return error_mark_node; } } else { if (typ2 == char_type_node) { error_init (init_loc, "wide character array initialized " "from non-wide string"); return error_mark_node; } else if (!comptypes(typ1, typ2)) { error_init (init_loc, "wide character array initialized " "from incompatible wide string"); return error_mark_node; } } TREE_TYPE (inside_init) = type; if (TYPE_DOMAIN (type) != NULL_TREE && TYPE_SIZE (type) != NULL_TREE && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) { unsigned HOST_WIDE_INT len = TREE_STRING_LENGTH (inside_init); /* Subtract the size of a single (possibly wide) character because it's ok to ignore the terminating null char that is counted in the length of the constant. */ if (compare_tree_int (TYPE_SIZE_UNIT (type), (len - (TYPE_PRECISION (typ1) / BITS_PER_UNIT))) < 0) pedwarn_init (init_loc, 0, ("initializer-string for array of chars " "is too long")); else if (warn_cxx_compat && compare_tree_int (TYPE_SIZE_UNIT (type), len) < 0) warning_at (init_loc, OPT_Wc___compat, ("initializer-string for array chars " "is too long for C++")); } return inside_init; } else if (INTEGRAL_TYPE_P (typ1)) { error_init (init_loc, "array of inappropriate type initialized " "from string constant"); return error_mark_node; } } /* Build a VECTOR_CST from a *constant* vector constructor. If the vector constructor is not constant (e.g. {1,2,3,foo()}) then punt below and handle as a constructor. */ if (code == VECTOR_TYPE && VECTOR_TYPE_P (TREE_TYPE (inside_init)) && vector_types_convertible_p (TREE_TYPE (inside_init), type, true) && TREE_CONSTANT (inside_init)) { if (TREE_CODE (inside_init) == VECTOR_CST && comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)), TYPE_MAIN_VARIANT (type))) return inside_init; if (TREE_CODE (inside_init) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; tree value; bool constant_p = true; /* Iterate through elements and check if all constructor elements are *_CSTs. */ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (inside_init), ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) return build_vector_from_ctor (type, CONSTRUCTOR_ELTS (inside_init)); } } if (warn_sequence_point) verify_sequence_points (inside_init); /* Any type can be initialized from an expression of the same type, optionally with braces. */ if (inside_init && TREE_TYPE (inside_init) != NULL_TREE && (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)), TYPE_MAIN_VARIANT (type)) || (code == ARRAY_TYPE && comptypes (TREE_TYPE (inside_init), type)) || (code == VECTOR_TYPE && comptypes (TREE_TYPE (inside_init), type)) || (code == POINTER_TYPE && TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE && comptypes (TREE_TYPE (TREE_TYPE (inside_init)), TREE_TYPE (type))))) { if (code == POINTER_TYPE) { if (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE) { if (TREE_CODE (inside_init) == STRING_CST || TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR) inside_init = array_to_pointer_conversion (init_loc, inside_init); else { error_init (init_loc, "invalid use of non-lvalue array"); return error_mark_node; } } } if (code == VECTOR_TYPE) /* Although the types are compatible, we may require a conversion. */ inside_init = convert (type, inside_init); if (require_constant && TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR) { /* As an extension, allow initializing objects with static storage duration with compound literals (which are then treated just as the brace enclosed list they contain). Also allow this for vectors, as we can only assign them with compound literals. */ if (flag_isoc99 && code != VECTOR_TYPE) pedwarn_init (init_loc, OPT_Wpedantic, "initializer element " "is not constant"); tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init); inside_init = DECL_INITIAL (decl); } if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST && TREE_CODE (inside_init) != CONSTRUCTOR) { error_init (init_loc, "array initialized from non-constant array " "expression"); return error_mark_node; } /* Compound expressions can only occur here if -Wpedantic or -pedantic-errors is specified. In the later case, we always want an error. In the former case, we simply want a warning. */ if (require_constant && pedantic && TREE_CODE (inside_init) == COMPOUND_EXPR) { inside_init = valid_compound_expr_initializer (inside_init, TREE_TYPE (inside_init)); if (inside_init == error_mark_node) error_init (init_loc, "initializer element is not constant"); else pedwarn_init (init_loc, OPT_Wpedantic, "initializer element is not constant"); if (flag_pedantic_errors) inside_init = error_mark_node; } else if (require_constant && !initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init))) { error_init (init_loc, "initializer element is not constant"); inside_init = error_mark_node; } else if (require_constant && !maybe_const) pedwarn_init (init_loc, OPT_Wpedantic, "initializer element is not a constant expression"); /* Added to enable additional -Wsuggest-attribute=format warnings. */ if (TREE_CODE (TREE_TYPE (inside_init)) == POINTER_TYPE) inside_init = convert_for_assignment (init_loc, UNKNOWN_LOCATION, type, inside_init, origtype, ic_init, null_pointer_constant, NULL_TREE, NULL_TREE, 0); return inside_init; } /* Handle scalar types, including conversions. */ if (code == INTEGER_TYPE || code == REAL_TYPE || code == FIXED_POINT_TYPE || code == POINTER_TYPE || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE || code == COMPLEX_TYPE || code == VECTOR_TYPE) { if (TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE && (TREE_CODE (init) == STRING_CST || TREE_CODE (init) == COMPOUND_LITERAL_EXPR)) inside_init = init = array_to_pointer_conversion (init_loc, init); if (semantic_type) inside_init = build1 (EXCESS_PRECISION_EXPR, semantic_type, inside_init); inside_init = convert_for_assignment (init_loc, UNKNOWN_LOCATION, type, inside_init, origtype, ic_init, null_pointer_constant, NULL_TREE, NULL_TREE, 0); /* Check to see if we have already given an error message. */ if (inside_init == error_mark_node) ; else if (require_constant && !TREE_CONSTANT (inside_init)) { error_init (init_loc, "initializer element is not constant"); inside_init = error_mark_node; } else if (require_constant && !initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init))) { error_init (init_loc, "initializer element is not computable at " "load time"); inside_init = error_mark_node; } else if (require_constant && !maybe_const) pedwarn_init (init_loc, OPT_Wpedantic, "initializer element is not a constant expression"); return inside_init; } /* Come here only for records and arrays. */ if (COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) { error_init (init_loc, "variable-sized object may not be initialized"); return error_mark_node; } error_init (init_loc, "invalid initializer"); return error_mark_node; } /* Handle initializers that use braces. */ /* Type of object we are accumulating a constructor for. This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */ static tree constructor_type; /* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields left to fill. */ static tree constructor_fields; /* For an ARRAY_TYPE, this is the specified index at which to store the next element we get. */ static tree constructor_index; /* For an ARRAY_TYPE, this is the maximum index. */ static tree constructor_max_index; /* For a RECORD_TYPE, this is the first field not yet written out. */ static tree constructor_unfilled_fields; /* For an ARRAY_TYPE, this is the index of the first element not yet written out. */ static tree constructor_unfilled_index; /* In a RECORD_TYPE, the byte index of the next consecutive field. This is so we can generate gaps between fields, when appropriate. */ static tree constructor_bit_index; /* If we are saving up the elements rather than allocating them, this is the list of elements so far (in reverse order, most recent first). */ static vec<constructor_elt, va_gc> *constructor_elements; /* 1 if constructor should be incrementally stored into a constructor chain, 0 if all the elements should be kept in AVL tree. */ static int constructor_incremental; /* 1 if so far this constructor's elements are all compile-time constants. */ static int constructor_constant; /* 1 if so far this constructor's elements are all valid address constants. */ static int constructor_simple; /* 1 if this constructor has an element that cannot be part of a constant expression. */ static int constructor_nonconst; /* 1 if this constructor is erroneous so far. */ static int constructor_erroneous; /* 1 if this constructor is the universal zero initializer { 0 }. */ static int constructor_zeroinit; /* Structure for managing pending initializer elements, organized as an AVL tree. */ struct init_node { struct init_node *left, *right; struct init_node *parent; int balance; tree purpose; tree value; tree origtype; }; /* Tree of pending elements at this constructor level. These are elements encountered out of order which belong at places we haven't reached yet in actually writing the output. Will never hold tree nodes across GC runs. */ static struct init_node *constructor_pending_elts; /* The SPELLING_DEPTH of this constructor. */ static int constructor_depth; /* DECL node for which an initializer is being read. 0 means we are reading a constructor expression such as (struct foo) {...}. */ static tree constructor_decl; /* Nonzero if this is an initializer for a top-level decl. */ static int constructor_top_level; /* Nonzero if there were any member designators in this initializer. */ static int constructor_designated; /* Nesting depth of designator list. */ static int designator_depth; /* Nonzero if there were diagnosed errors in this designator list. */ static int designator_erroneous; /* This stack has a level for each implicit or explicit level of structuring in the initializer, including the outermost one. It saves the values of most of the variables above. */ struct constructor_range_stack; struct constructor_stack { struct constructor_stack *next; tree type; tree fields; tree index; tree max_index; tree unfilled_index; tree unfilled_fields; tree bit_index; vec<constructor_elt, va_gc> *elements; struct init_node *pending_elts; int offset; int depth; /* If value nonzero, this value should replace the entire constructor at this level. */ struct c_expr replacement_value; struct constructor_range_stack *range_stack; char constant; char simple; char nonconst; char implicit; char erroneous; char outer; char incremental; char designated; int designator_depth; }; static struct constructor_stack *constructor_stack; /* This stack represents designators from some range designator up to the last designator in the list. */ struct constructor_range_stack { struct constructor_range_stack *next, *prev; struct constructor_stack *stack; tree range_start; tree index; tree range_end; tree fields; }; static struct constructor_range_stack *constructor_range_stack; /* This stack records separate initializers that are nested. Nested initializers can't happen in ANSI C, but GNU C allows them in cases like { ... (struct foo) { ... } ... }. */ struct initializer_stack { struct initializer_stack *next; tree decl; struct constructor_stack *constructor_stack; struct constructor_range_stack *constructor_range_stack; vec<constructor_elt, va_gc> *elements; struct spelling *spelling; struct spelling *spelling_base; int spelling_size; char top_level; char require_constant_value; char require_constant_elements; rich_location *missing_brace_richloc; }; static struct initializer_stack *initializer_stack; /* Prepare to parse and output the initializer for variable DECL. */ void start_init (tree decl, tree asmspec_tree ATTRIBUTE_UNUSED, int top_level, rich_location *richloc) { const char *locus; struct initializer_stack *p = XNEW (struct initializer_stack); p->decl = constructor_decl; p->require_constant_value = require_constant_value; p->require_constant_elements = require_constant_elements; p->constructor_stack = constructor_stack; p->constructor_range_stack = constructor_range_stack; p->elements = constructor_elements; p->spelling = spelling; p->spelling_base = spelling_base; p->spelling_size = spelling_size; p->top_level = constructor_top_level; p->next = initializer_stack; p->missing_brace_richloc = richloc; initializer_stack = p; constructor_decl = decl; constructor_designated = 0; constructor_top_level = top_level; if (decl != NULL_TREE && decl != error_mark_node) { require_constant_value = TREE_STATIC (decl); require_constant_elements = ((TREE_STATIC (decl) || (pedantic && !flag_isoc99)) /* For a scalar, you can always use any value to initialize, even within braces. */ && AGGREGATE_TYPE_P (TREE_TYPE (decl))); locus = identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl))); } else { require_constant_value = 0; require_constant_elements = 0; locus = _("(anonymous)"); } constructor_stack = 0; constructor_range_stack = 0; found_missing_braces = 0; spelling_base = 0; spelling_size = 0; RESTORE_SPELLING_DEPTH (0); if (locus) push_string (locus); } void finish_init (void) { struct initializer_stack *p = initializer_stack; /* Free the whole constructor stack of this initializer. */ while (constructor_stack) { struct constructor_stack *q = constructor_stack; constructor_stack = q->next; free (q); } gcc_assert (!constructor_range_stack); /* Pop back to the data of the outer initializer (if any). */ free (spelling_base); constructor_decl = p->decl; require_constant_value = p->require_constant_value; require_constant_elements = p->require_constant_elements; constructor_stack = p->constructor_stack; constructor_range_stack = p->constructor_range_stack; constructor_elements = p->elements; spelling = p->spelling; spelling_base = p->spelling_base; spelling_size = p->spelling_size; constructor_top_level = p->top_level; initializer_stack = p->next; free (p); } /* Call here when we see the initializer is surrounded by braces. This is instead of a call to push_init_level; it is matched by a call to pop_init_level. TYPE is the type to initialize, for a constructor expression. For an initializer for a decl, TYPE is zero. */ void really_start_incremental_init (tree type) { struct constructor_stack *p = XNEW (struct constructor_stack); if (type == NULL_TREE) type = TREE_TYPE (constructor_decl); if (VECTOR_TYPE_P (type) && TYPE_VECTOR_OPAQUE (type)) error ("opaque vector types cannot be initialized"); p->type = constructor_type; p->fields = constructor_fields; p->index = constructor_index; p->max_index = constructor_max_index; p->unfilled_index = constructor_unfilled_index; p->unfilled_fields = constructor_unfilled_fields; p->bit_index = constructor_bit_index; p->elements = constructor_elements; p->constant = constructor_constant; p->simple = constructor_simple; p->nonconst = constructor_nonconst; p->erroneous = constructor_erroneous; p->pending_elts = constructor_pending_elts; p->depth = constructor_depth; p->replacement_value.value = 0; p->replacement_value.original_code = ERROR_MARK; p->replacement_value.original_type = NULL; p->implicit = 0; p->range_stack = 0; p->outer = 0; p->incremental = constructor_incremental; p->designated = constructor_designated; p->designator_depth = designator_depth; p->next = 0; constructor_stack = p; constructor_constant = 1; constructor_simple = 1; constructor_nonconst = 0; constructor_depth = SPELLING_DEPTH (); constructor_elements = NULL; constructor_pending_elts = 0; constructor_type = type; constructor_incremental = 1; constructor_designated = 0; constructor_zeroinit = 1; designator_depth = 0; designator_erroneous = 0; if (RECORD_OR_UNION_TYPE_P (constructor_type)) { constructor_fields = TYPE_FIELDS (constructor_type); /* Skip any nameless bit fields at the beginning. */ while (constructor_fields != NULL_TREE && DECL_UNNAMED_BIT_FIELD (constructor_fields)) constructor_fields = DECL_CHAIN (constructor_fields); constructor_unfilled_fields = constructor_fields; constructor_bit_index = bitsize_zero_node; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (TYPE_DOMAIN (constructor_type)) { constructor_max_index = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)); /* Detect non-empty initializations of zero-length arrays. */ if (constructor_max_index == NULL_TREE && TYPE_SIZE (constructor_type)) constructor_max_index = integer_minus_one_node; /* constructor_max_index needs to be an INTEGER_CST. Attempts to initialize VLAs will cause a proper error; avoid tree checking errors as well by setting a safe value. */ if (constructor_max_index && TREE_CODE (constructor_max_index) != INTEGER_CST) constructor_max_index = integer_minus_one_node; constructor_index = convert (bitsizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type))); } else { constructor_index = bitsize_zero_node; constructor_max_index = NULL_TREE; } constructor_unfilled_index = constructor_index; } else if (VECTOR_TYPE_P (constructor_type)) { /* Vectors are like simple fixed-size arrays. */ constructor_max_index = bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1); constructor_index = bitsize_zero_node; constructor_unfilled_index = constructor_index; } else { /* Handle the case of int x = {5}; */ constructor_fields = constructor_type; constructor_unfilled_fields = constructor_type; } } extern location_t last_init_list_comma; /* Called when we see an open brace for a nested initializer. Finish off any pending levels with implicit braces. */ void finish_implicit_inits (location_t loc, struct obstack *braced_init_obstack) { while (constructor_stack->implicit) { if (RECORD_OR_UNION_TYPE_P (constructor_type) && constructor_fields == NULL_TREE) process_init_element (input_location, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); else if (TREE_CODE (constructor_type) == ARRAY_TYPE && constructor_max_index && tree_int_cst_lt (constructor_max_index, constructor_index)) process_init_element (input_location, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); else break; } } /* Push down into a subobject, for initialization. If this is for an explicit set of braces, IMPLICIT is 0. If it is because the next element belongs at a lower level, IMPLICIT is 1 (or 2 if the push is because of designator list). */ void push_init_level (location_t loc, int implicit, struct obstack *braced_init_obstack) { struct constructor_stack *p; tree value = NULL_TREE; /* Unless this is an explicit brace, we need to preserve previous content if any. */ if (implicit) { if (RECORD_OR_UNION_TYPE_P (constructor_type) && constructor_fields) value = find_init_member (constructor_fields, braced_init_obstack); else if (TREE_CODE (constructor_type) == ARRAY_TYPE) value = find_init_member (constructor_index, braced_init_obstack); } p = XNEW (struct constructor_stack); p->type = constructor_type; p->fields = constructor_fields; p->index = constructor_index; p->max_index = constructor_max_index; p->unfilled_index = constructor_unfilled_index; p->unfilled_fields = constructor_unfilled_fields; p->bit_index = constructor_bit_index; p->elements = constructor_elements; p->constant = constructor_constant; p->simple = constructor_simple; p->nonconst = constructor_nonconst; p->erroneous = constructor_erroneous; p->pending_elts = constructor_pending_elts; p->depth = constructor_depth; p->replacement_value.value = NULL_TREE; p->replacement_value.original_code = ERROR_MARK; p->replacement_value.original_type = NULL; p->implicit = implicit; p->outer = 0; p->incremental = constructor_incremental; p->designated = constructor_designated; p->designator_depth = designator_depth; p->next = constructor_stack; p->range_stack = 0; constructor_stack = p; constructor_constant = 1; constructor_simple = 1; constructor_nonconst = 0; constructor_depth = SPELLING_DEPTH (); constructor_elements = NULL; constructor_incremental = 1; constructor_designated = 0; constructor_pending_elts = 0; if (!implicit) { p->range_stack = constructor_range_stack; constructor_range_stack = 0; designator_depth = 0; designator_erroneous = 0; } /* Don't die if an entire brace-pair level is superfluous in the containing level. */ if (constructor_type == NULL_TREE) ; else if (RECORD_OR_UNION_TYPE_P (constructor_type)) { /* Don't die if there are extra init elts at the end. */ if (constructor_fields == NULL_TREE) constructor_type = NULL_TREE; else { constructor_type = TREE_TYPE (constructor_fields); push_member_name (constructor_fields); constructor_depth++; } /* If upper initializer is designated, then mark this as designated too to prevent bogus warnings. */ constructor_designated = p->designated; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { constructor_type = TREE_TYPE (constructor_type); push_array_bounds (tree_to_uhwi (constructor_index)); constructor_depth++; } if (constructor_type == NULL_TREE) { error_init (loc, "extra brace group at end of initializer"); constructor_fields = NULL_TREE; constructor_unfilled_fields = NULL_TREE; return; } if (value && TREE_CODE (value) == CONSTRUCTOR) { constructor_constant = TREE_CONSTANT (value); constructor_simple = TREE_STATIC (value); constructor_nonconst = CONSTRUCTOR_NON_CONST (value); constructor_elements = CONSTRUCTOR_ELTS (value); if (!vec_safe_is_empty (constructor_elements) && (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == ARRAY_TYPE)) set_nonincremental_init (braced_init_obstack); } if (implicit == 1) { found_missing_braces = 1; if (initializer_stack->missing_brace_richloc) initializer_stack->missing_brace_richloc->add_fixit_insert_before (loc, "{"); } if (RECORD_OR_UNION_TYPE_P (constructor_type)) { constructor_fields = TYPE_FIELDS (constructor_type); /* Skip any nameless bit fields at the beginning. */ while (constructor_fields != NULL_TREE && DECL_UNNAMED_BIT_FIELD (constructor_fields)) constructor_fields = DECL_CHAIN (constructor_fields); constructor_unfilled_fields = constructor_fields; constructor_bit_index = bitsize_zero_node; } else if (VECTOR_TYPE_P (constructor_type)) { /* Vectors are like simple fixed-size arrays. */ constructor_max_index = bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1); constructor_index = bitsize_int (0); constructor_unfilled_index = constructor_index; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (TYPE_DOMAIN (constructor_type)) { constructor_max_index = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)); /* Detect non-empty initializations of zero-length arrays. */ if (constructor_max_index == NULL_TREE && TYPE_SIZE (constructor_type)) constructor_max_index = integer_minus_one_node; /* constructor_max_index needs to be an INTEGER_CST. Attempts to initialize VLAs will cause a proper error; avoid tree checking errors as well by setting a safe value. */ if (constructor_max_index && TREE_CODE (constructor_max_index) != INTEGER_CST) constructor_max_index = integer_minus_one_node; constructor_index = convert (bitsizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type))); } else constructor_index = bitsize_zero_node; constructor_unfilled_index = constructor_index; if (value && TREE_CODE (value) == STRING_CST) { /* We need to split the char/wchar array into individual characters, so that we don't have to special case it everywhere. */ set_nonincremental_init_from_string (value, braced_init_obstack); } } else { if (constructor_type != error_mark_node) warning_init (input_location, 0, "braces around scalar initializer"); constructor_fields = constructor_type; constructor_unfilled_fields = constructor_type; } } /* At the end of an implicit or explicit brace level, finish up that level of constructor. If a single expression with redundant braces initialized that level, return the c_expr structure for that expression. Otherwise, the original_code element is set to ERROR_MARK. If we were outputting the elements as they are read, return 0 as the value from inner levels (process_init_element ignores that), but return error_mark_node as the value from the outermost level (that's what we want to put in DECL_INITIAL). Otherwise, return a CONSTRUCTOR expression as the value. */ struct c_expr pop_init_level (location_t loc, int implicit, struct obstack *braced_init_obstack, location_t insert_before) { struct constructor_stack *p; struct c_expr ret; ret.value = NULL_TREE; ret.original_code = ERROR_MARK; ret.original_type = NULL; if (implicit == 0) { /* When we come to an explicit close brace, pop any inner levels that didn't have explicit braces. */ while (constructor_stack->implicit) process_init_element (input_location, pop_init_level (loc, 1, braced_init_obstack, insert_before), true, braced_init_obstack); gcc_assert (!constructor_range_stack); } else if (initializer_stack->missing_brace_richloc) initializer_stack->missing_brace_richloc->add_fixit_insert_before (insert_before, "}"); /* Now output all pending elements. */ constructor_incremental = 1; output_pending_init_elements (1, braced_init_obstack); p = constructor_stack; /* Error for initializing a flexible array member, or a zero-length array member in an inappropriate context. */ if (constructor_type && constructor_fields && TREE_CODE (constructor_type) == ARRAY_TYPE && TYPE_DOMAIN (constructor_type) && !TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type))) { /* Silently discard empty initializations. The parser will already have pedwarned for empty brackets. */ if (integer_zerop (constructor_unfilled_index)) constructor_type = NULL_TREE; else { gcc_assert (!TYPE_SIZE (constructor_type)); if (constructor_depth > 2) error_init (loc, "initialization of flexible array member in a nested context"); else pedwarn_init (loc, OPT_Wpedantic, "initialization of a flexible array member"); /* We have already issued an error message for the existence of a flexible array member not at the end of the structure. Discard the initializer so that we do not die later. */ if (DECL_CHAIN (constructor_fields) != NULL_TREE) constructor_type = NULL_TREE; } } switch (vec_safe_length (constructor_elements)) { case 0: /* Initialization with { } counts as zeroinit. */ constructor_zeroinit = 1; break; case 1: /* This might be zeroinit as well. */ if (integer_zerop ((*constructor_elements)[0].value)) constructor_zeroinit = 1; break; default: /* If the constructor has more than one element, it can't be { 0 }. */ constructor_zeroinit = 0; break; } /* Warn when some structs are initialized with direct aggregation. */ if (!implicit && found_missing_braces && warn_missing_braces && !constructor_zeroinit) { gcc_assert (initializer_stack->missing_brace_richloc); warning_at (initializer_stack->missing_brace_richloc, OPT_Wmissing_braces, "missing braces around initializer"); } /* Warn when some struct elements are implicitly initialized to zero. */ if (warn_missing_field_initializers && constructor_type && TREE_CODE (constructor_type) == RECORD_TYPE && constructor_unfilled_fields) { /* Do not warn for flexible array members or zero-length arrays. */ while (constructor_unfilled_fields && (!DECL_SIZE (constructor_unfilled_fields) || integer_zerop (DECL_SIZE (constructor_unfilled_fields)))) constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields); if (constructor_unfilled_fields /* Do not warn if this level of the initializer uses member designators; it is likely to be deliberate. */ && !constructor_designated /* Do not warn about initializing with { 0 } or with { }. */ && !constructor_zeroinit) { if (warning_at (input_location, OPT_Wmissing_field_initializers, "missing initializer for field %qD of %qT", constructor_unfilled_fields, constructor_type)) inform (DECL_SOURCE_LOCATION (constructor_unfilled_fields), "%qD declared here", constructor_unfilled_fields); } } /* Pad out the end of the structure. */ if (p->replacement_value.value) /* If this closes a superfluous brace pair, just pass out the element between them. */ ret = p->replacement_value; else if (constructor_type == NULL_TREE) ; else if (!RECORD_OR_UNION_TYPE_P (constructor_type) && TREE_CODE (constructor_type) != ARRAY_TYPE && !VECTOR_TYPE_P (constructor_type)) { /* A nonincremental scalar initializer--just return the element, after verifying there is just one. */ if (vec_safe_is_empty (constructor_elements)) { if (!constructor_erroneous) error_init (loc, "empty scalar initializer"); ret.value = error_mark_node; } else if (vec_safe_length (constructor_elements) != 1) { error_init (loc, "extra elements in scalar initializer"); ret.value = (*constructor_elements)[0].value; } else ret.value = (*constructor_elements)[0].value; } else { if (constructor_erroneous) ret.value = error_mark_node; else { ret.value = build_constructor (constructor_type, constructor_elements); if (constructor_constant) TREE_CONSTANT (ret.value) = 1; if (constructor_constant && constructor_simple) TREE_STATIC (ret.value) = 1; if (constructor_nonconst) CONSTRUCTOR_NON_CONST (ret.value) = 1; } } if (ret.value && TREE_CODE (ret.value) != CONSTRUCTOR) { if (constructor_nonconst) ret.original_code = C_MAYBE_CONST_EXPR; else if (ret.original_code == C_MAYBE_CONST_EXPR) ret.original_code = ERROR_MARK; } constructor_type = p->type; constructor_fields = p->fields; constructor_index = p->index; constructor_max_index = p->max_index; constructor_unfilled_index = p->unfilled_index; constructor_unfilled_fields = p->unfilled_fields; constructor_bit_index = p->bit_index; constructor_elements = p->elements; constructor_constant = p->constant; constructor_simple = p->simple; constructor_nonconst = p->nonconst; constructor_erroneous = p->erroneous; constructor_incremental = p->incremental; constructor_designated = p->designated; designator_depth = p->designator_depth; constructor_pending_elts = p->pending_elts; constructor_depth = p->depth; if (!p->implicit) constructor_range_stack = p->range_stack; RESTORE_SPELLING_DEPTH (constructor_depth); constructor_stack = p->next; free (p); if (ret.value == NULL_TREE && constructor_stack == 0) ret.value = error_mark_node; return ret; } /* Common handling for both array range and field name designators. ARRAY argument is nonzero for array ranges. Returns false for success. */ static bool set_designator (location_t loc, bool array, struct obstack *braced_init_obstack) { tree subtype; enum tree_code subcode; /* Don't die if an entire brace-pair level is superfluous in the containing level. */ if (constructor_type == NULL_TREE) return true; /* If there were errors in this designator list already, bail out silently. */ if (designator_erroneous) return true; if (!designator_depth) { gcc_assert (!constructor_range_stack); /* Designator list starts at the level of closest explicit braces. */ while (constructor_stack->implicit) process_init_element (input_location, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); constructor_designated = 1; return false; } switch (TREE_CODE (constructor_type)) { case RECORD_TYPE: case UNION_TYPE: subtype = TREE_TYPE (constructor_fields); if (subtype != error_mark_node) subtype = TYPE_MAIN_VARIANT (subtype); break; case ARRAY_TYPE: subtype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type)); break; default: gcc_unreachable (); } subcode = TREE_CODE (subtype); if (array && subcode != ARRAY_TYPE) { error_init (loc, "array index in non-array initializer"); return true; } else if (!array && subcode != RECORD_TYPE && subcode != UNION_TYPE) { error_init (loc, "field name not in record or union initializer"); return true; } constructor_designated = 1; finish_implicit_inits (loc, braced_init_obstack); push_init_level (loc, 2, braced_init_obstack); return false; } /* If there are range designators in designator list, push a new designator to constructor_range_stack. RANGE_END is end of such stack range or NULL_TREE if there is no range designator at this level. */ static void push_range_stack (tree range_end, struct obstack * braced_init_obstack) { struct constructor_range_stack *p; p = (struct constructor_range_stack *) obstack_alloc (braced_init_obstack, sizeof (struct constructor_range_stack)); p->prev = constructor_range_stack; p->next = 0; p->fields = constructor_fields; p->range_start = constructor_index; p->index = constructor_index; p->stack = constructor_stack; p->range_end = range_end; if (constructor_range_stack) constructor_range_stack->next = p; constructor_range_stack = p; } /* Within an array initializer, specify the next index to be initialized. FIRST is that index. If LAST is nonzero, then initialize a range of indices, running from FIRST through LAST. */ void set_init_index (location_t loc, tree first, tree last, struct obstack *braced_init_obstack) { if (set_designator (loc, true, braced_init_obstack)) return; designator_erroneous = 1; if (!INTEGRAL_TYPE_P (TREE_TYPE (first)) || (last && !INTEGRAL_TYPE_P (TREE_TYPE (last)))) { error_init (loc, "array index in initializer not of integer type"); return; } if (TREE_CODE (first) != INTEGER_CST) { first = c_fully_fold (first, false, NULL); if (TREE_CODE (first) == INTEGER_CST) pedwarn_init (loc, OPT_Wpedantic, "array index in initializer is not " "an integer constant expression"); } if (last && TREE_CODE (last) != INTEGER_CST) { last = c_fully_fold (last, false, NULL); if (TREE_CODE (last) == INTEGER_CST) pedwarn_init (loc, OPT_Wpedantic, "array index in initializer is not " "an integer constant expression"); } if (TREE_CODE (first) != INTEGER_CST) error_init (loc, "nonconstant array index in initializer"); else if (last != NULL_TREE && TREE_CODE (last) != INTEGER_CST) error_init (loc, "nonconstant array index in initializer"); else if (TREE_CODE (constructor_type) != ARRAY_TYPE) error_init (loc, "array index in non-array initializer"); else if (tree_int_cst_sgn (first) == -1) error_init (loc, "array index in initializer exceeds array bounds"); else if (constructor_max_index && tree_int_cst_lt (constructor_max_index, first)) error_init (loc, "array index in initializer exceeds array bounds"); else { constant_expression_warning (first); if (last) constant_expression_warning (last); constructor_index = convert (bitsizetype, first); if (tree_int_cst_lt (constructor_index, first)) { constructor_index = copy_node (constructor_index); TREE_OVERFLOW (constructor_index) = 1; } if (last) { if (tree_int_cst_equal (first, last)) last = NULL_TREE; else if (tree_int_cst_lt (last, first)) { error_init (loc, "empty index range in initializer"); last = NULL_TREE; } else { last = convert (bitsizetype, last); if (constructor_max_index != NULL_TREE && tree_int_cst_lt (constructor_max_index, last)) { error_init (loc, "array index range in initializer exceeds " "array bounds"); last = NULL_TREE; } } } designator_depth++; designator_erroneous = 0; if (constructor_range_stack || last) push_range_stack (last, braced_init_obstack); } } /* Within a struct initializer, specify the next field to be initialized. */ void set_init_label (location_t loc, tree fieldname, location_t fieldname_loc, struct obstack *braced_init_obstack) { tree field; if (set_designator (loc, false, braced_init_obstack)) return; designator_erroneous = 1; if (!RECORD_OR_UNION_TYPE_P (constructor_type)) { error_init (loc, "field name not in record or union initializer"); return; } field = lookup_field (constructor_type, fieldname); if (field == NULL_TREE) { tree guessed_id = lookup_field_fuzzy (constructor_type, fieldname); if (guessed_id) { gcc_rich_location rich_loc (fieldname_loc); rich_loc.add_fixit_misspelled_id (fieldname_loc, guessed_id); error_at (&rich_loc, "%qT has no member named %qE; did you mean %qE?", constructor_type, fieldname, guessed_id); } else error_at (fieldname_loc, "%qT has no member named %qE", constructor_type, fieldname); } else do { constructor_fields = TREE_VALUE (field); designator_depth++; designator_erroneous = 0; if (constructor_range_stack) push_range_stack (NULL_TREE, braced_init_obstack); field = TREE_CHAIN (field); if (field) { if (set_designator (loc, false, braced_init_obstack)) return; } } while (field != NULL_TREE); } /* Add a new initializer to the tree of pending initializers. PURPOSE identifies the initializer, either array index or field in a structure. VALUE is the value of that index or field. If ORIGTYPE is not NULL_TREE, it is the original type of VALUE. IMPLICIT is true if value comes from pop_init_level (1), the new initializer has been merged with the existing one and thus no warnings should be emitted about overriding an existing initializer. */ static void add_pending_init (location_t loc, tree purpose, tree value, tree origtype, bool implicit, struct obstack *braced_init_obstack) { struct init_node *p, **q, *r; q = &constructor_pending_elts; p = 0; if (TREE_CODE (constructor_type) == ARRAY_TYPE) { while (*q != 0) { p = *q; if (tree_int_cst_lt (purpose, p->purpose)) q = &p->left; else if (tree_int_cst_lt (p->purpose, purpose)) q = &p->right; else { if (!implicit) { if (TREE_SIDE_EFFECTS (p->value)) warning_init (loc, OPT_Woverride_init_side_effects, "initialized field with side-effects " "overwritten"); else if (warn_override_init) warning_init (loc, OPT_Woverride_init, "initialized field overwritten"); } p->value = value; p->origtype = origtype; return; } } } else { tree bitpos; bitpos = bit_position (purpose); while (*q != NULL) { p = *q; if (tree_int_cst_lt (bitpos, bit_position (p->purpose))) q = &p->left; else if (p->purpose != purpose) q = &p->right; else { if (!implicit) { if (TREE_SIDE_EFFECTS (p->value)) warning_init (loc, OPT_Woverride_init_side_effects, "initialized field with side-effects " "overwritten"); else if (warn_override_init) warning_init (loc, OPT_Woverride_init, "initialized field overwritten"); } p->value = value; p->origtype = origtype; return; } } } r = (struct init_node *) obstack_alloc (braced_init_obstack, sizeof (struct init_node)); r->purpose = purpose; r->value = value; r->origtype = origtype; *q = r; r->parent = p; r->left = 0; r->right = 0; r->balance = 0; while (p) { struct init_node *s; if (r == p->left) { if (p->balance == 0) p->balance = -1; else if (p->balance < 0) { if (r->balance < 0) { /* L rotation. */ p->left = r->right; if (p->left) p->left->parent = p; r->right = p; p->balance = 0; r->balance = 0; s = p->parent; p->parent = r; r->parent = s; if (s) { if (s->left == p) s->left = r; else s->right = r; } else constructor_pending_elts = r; } else { /* LR rotation. */ struct init_node *t = r->right; r->right = t->left; if (r->right) r->right->parent = r; t->left = r; p->left = t->right; if (p->left) p->left->parent = p; t->right = p; p->balance = t->balance < 0; r->balance = -(t->balance > 0); t->balance = 0; s = p->parent; p->parent = t; r->parent = t; t->parent = s; if (s) { if (s->left == p) s->left = t; else s->right = t; } else constructor_pending_elts = t; } break; } else { /* p->balance == +1; growth of left side balances the node. */ p->balance = 0; break; } } else /* r == p->right */ { if (p->balance == 0) /* Growth propagation from right side. */ p->balance++; else if (p->balance > 0) { if (r->balance > 0) { /* R rotation. */ p->right = r->left; if (p->right) p->right->parent = p; r->left = p; p->balance = 0; r->balance = 0; s = p->parent; p->parent = r; r->parent = s; if (s) { if (s->left == p) s->left = r; else s->right = r; } else constructor_pending_elts = r; } else /* r->balance == -1 */ { /* RL rotation */ struct init_node *t = r->left; r->left = t->right; if (r->left) r->left->parent = r; t->right = r; p->right = t->left; if (p->right) p->right->parent = p; t->left = p; r->balance = (t->balance < 0); p->balance = -(t->balance > 0); t->balance = 0; s = p->parent; p->parent = t; r->parent = t; t->parent = s; if (s) { if (s->left == p) s->left = t; else s->right = t; } else constructor_pending_elts = t; } break; } else { /* p->balance == -1; growth of right side balances the node. */ p->balance = 0; break; } } r = p; p = p->parent; } } /* Build AVL tree from a sorted chain. */ static void set_nonincremental_init (struct obstack * braced_init_obstack) { unsigned HOST_WIDE_INT ix; tree index, value; if (TREE_CODE (constructor_type) != RECORD_TYPE && TREE_CODE (constructor_type) != ARRAY_TYPE) return; FOR_EACH_CONSTRUCTOR_ELT (constructor_elements, ix, index, value) add_pending_init (input_location, index, value, NULL_TREE, true, braced_init_obstack); constructor_elements = NULL; if (TREE_CODE (constructor_type) == RECORD_TYPE) { constructor_unfilled_fields = TYPE_FIELDS (constructor_type); /* Skip any nameless bit fields at the beginning. */ while (constructor_unfilled_fields != NULL_TREE && DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields)) constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields); } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (TYPE_DOMAIN (constructor_type)) constructor_unfilled_index = convert (bitsizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type))); else constructor_unfilled_index = bitsize_zero_node; } constructor_incremental = 0; } /* Build AVL tree from a string constant. */ static void set_nonincremental_init_from_string (tree str, struct obstack * braced_init_obstack) { tree value, purpose, type; HOST_WIDE_INT val[2]; const char *p, *end; int byte, wchar_bytes, charwidth, bitpos; gcc_assert (TREE_CODE (constructor_type) == ARRAY_TYPE); wchar_bytes = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (str))) / BITS_PER_UNIT; charwidth = TYPE_PRECISION (char_type_node); gcc_assert ((size_t) wchar_bytes * charwidth <= ARRAY_SIZE (val) * HOST_BITS_PER_WIDE_INT); type = TREE_TYPE (constructor_type); p = TREE_STRING_POINTER (str); end = p + TREE_STRING_LENGTH (str); for (purpose = bitsize_zero_node; p < end && !(constructor_max_index && tree_int_cst_lt (constructor_max_index, purpose)); purpose = size_binop (PLUS_EXPR, purpose, bitsize_one_node)) { if (wchar_bytes == 1) { val[0] = (unsigned char) *p++; val[1] = 0; } else { val[1] = 0; val[0] = 0; for (byte = 0; byte < wchar_bytes; byte++) { if (BYTES_BIG_ENDIAN) bitpos = (wchar_bytes - byte - 1) * charwidth; else bitpos = byte * charwidth; val[bitpos / HOST_BITS_PER_WIDE_INT] |= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++)) << (bitpos % HOST_BITS_PER_WIDE_INT); } } if (!TYPE_UNSIGNED (type)) { bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR; if (bitpos < HOST_BITS_PER_WIDE_INT) { if (val[0] & (HOST_WIDE_INT_1 << (bitpos - 1))) { val[0] |= HOST_WIDE_INT_M1U << bitpos; val[1] = -1; } } else if (bitpos == HOST_BITS_PER_WIDE_INT) { if (val[0] < 0) val[1] = -1; } else if (val[1] & (HOST_WIDE_INT_1 << (bitpos - 1 - HOST_BITS_PER_WIDE_INT))) val[1] |= HOST_WIDE_INT_M1U << (bitpos - HOST_BITS_PER_WIDE_INT); } value = wide_int_to_tree (type, wide_int::from_array (val, 2, HOST_BITS_PER_WIDE_INT * 2)); add_pending_init (input_location, purpose, value, NULL_TREE, true, braced_init_obstack); } constructor_incremental = 0; } /* Return value of FIELD in pending initializer or NULL_TREE if the field was not initialized yet. */ static tree find_init_member (tree field, struct obstack * braced_init_obstack) { struct init_node *p; if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (constructor_incremental && tree_int_cst_lt (field, constructor_unfilled_index)) set_nonincremental_init (braced_init_obstack); p = constructor_pending_elts; while (p) { if (tree_int_cst_lt (field, p->purpose)) p = p->left; else if (tree_int_cst_lt (p->purpose, field)) p = p->right; else return p->value; } } else if (TREE_CODE (constructor_type) == RECORD_TYPE) { tree bitpos = bit_position (field); if (constructor_incremental && (!constructor_unfilled_fields || tree_int_cst_lt (bitpos, bit_position (constructor_unfilled_fields)))) set_nonincremental_init (braced_init_obstack); p = constructor_pending_elts; while (p) { if (field == p->purpose) return p->value; else if (tree_int_cst_lt (bitpos, bit_position (p->purpose))) p = p->left; else p = p->right; } } else if (TREE_CODE (constructor_type) == UNION_TYPE) { if (!vec_safe_is_empty (constructor_elements) && (constructor_elements->last ().index == field)) return constructor_elements->last ().value; } return NULL_TREE; } /* "Output" the next constructor element. At top level, really output it to assembler code now. Otherwise, collect it in a list from which we will make a CONSTRUCTOR. If ORIGTYPE is not NULL_TREE, it is the original type of VALUE. TYPE is the data type that the containing data type wants here. FIELD is the field (a FIELD_DECL) or the index that this element fills. If VALUE is a string constant, STRICT_STRING is true if it is unparenthesized or we should not warn here for it being parenthesized. For other types of VALUE, STRICT_STRING is not used. PENDING if true means output pending elements that belong right after this element. (PENDING is normally true; it is false while outputting pending elements, to avoid recursion.) IMPLICIT is true if value comes from pop_init_level (1), the new initializer has been merged with the existing one and thus no warnings should be emitted about overriding an existing initializer. */ static void output_init_element (location_t loc, tree value, tree origtype, bool strict_string, tree type, tree field, bool pending, bool implicit, struct obstack * braced_init_obstack) { tree semantic_type = NULL_TREE; bool maybe_const = true; bool npc; if (type == error_mark_node || value == error_mark_node) { constructor_erroneous = 1; return; } if (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE && (TREE_CODE (value) == STRING_CST || TREE_CODE (value) == COMPOUND_LITERAL_EXPR) && !(TREE_CODE (value) == STRING_CST && TREE_CODE (type) == ARRAY_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (type))) && !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)), TYPE_MAIN_VARIANT (type))) value = array_to_pointer_conversion (input_location, value); if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR && require_constant_value && pending) { /* As an extension, allow initializing objects with static storage duration with compound literals (which are then treated just as the brace enclosed list they contain). */ if (flag_isoc99) pedwarn_init (loc, OPT_Wpedantic, "initializer element is not " "constant"); tree decl = COMPOUND_LITERAL_EXPR_DECL (value); value = DECL_INITIAL (decl); } npc = null_pointer_constant_p (value); if (TREE_CODE (value) == EXCESS_PRECISION_EXPR) { semantic_type = TREE_TYPE (value); value = TREE_OPERAND (value, 0); } value = c_fully_fold (value, require_constant_value, &maybe_const); if (value == error_mark_node) constructor_erroneous = 1; else if (!TREE_CONSTANT (value)) constructor_constant = 0; else if (!initializer_constant_valid_p (value, TREE_TYPE (value), AGGREGATE_TYPE_P (constructor_type) && TYPE_REVERSE_STORAGE_ORDER (constructor_type)) || (RECORD_OR_UNION_TYPE_P (constructor_type) && DECL_C_BIT_FIELD (field) && TREE_CODE (value) != INTEGER_CST)) constructor_simple = 0; if (!maybe_const) constructor_nonconst = 1; /* Digest the initializer and issue any errors about incompatible types before issuing errors about non-constant initializers. */ tree new_value = value; if (semantic_type) new_value = build1 (EXCESS_PRECISION_EXPR, semantic_type, value); new_value = digest_init (loc, type, new_value, origtype, npc, strict_string, require_constant_value); if (new_value == error_mark_node) { constructor_erroneous = 1; return; } if (require_constant_value || require_constant_elements) constant_expression_warning (new_value); /* Proceed to check the constness of the original initializer. */ if (!initializer_constant_valid_p (value, TREE_TYPE (value))) { if (require_constant_value) { error_init (loc, "initializer element is not constant"); value = error_mark_node; } else if (require_constant_elements) pedwarn (loc, OPT_Wpedantic, "initializer element is not computable at load time"); } else if (!maybe_const && (require_constant_value || require_constant_elements)) pedwarn_init (loc, OPT_Wpedantic, "initializer element is not a constant expression"); /* Issue -Wc++-compat warnings about initializing a bitfield with enum type. */ if (warn_cxx_compat && field != NULL_TREE && TREE_CODE (field) == FIELD_DECL && DECL_BIT_FIELD_TYPE (field) != NULL_TREE && (TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field)) != TYPE_MAIN_VARIANT (type)) && TREE_CODE (DECL_BIT_FIELD_TYPE (field)) == ENUMERAL_TYPE) { tree checktype = origtype != NULL_TREE ? origtype : TREE_TYPE (value); if (checktype != error_mark_node && (TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field)))) warning_init (loc, OPT_Wc___compat, "enum conversion in initialization is invalid in C++"); } /* If this field is empty and does not have side effects (and is not at the end of structure), don't do anything other than checking the initializer. */ if (field && (TREE_TYPE (field) == error_mark_node || (COMPLETE_TYPE_P (TREE_TYPE (field)) && integer_zerop (TYPE_SIZE (TREE_TYPE (field))) && !TREE_SIDE_EFFECTS (new_value) && (TREE_CODE (constructor_type) == ARRAY_TYPE || DECL_CHAIN (field))))) return; /* Finally, set VALUE to the initializer value digested above. */ value = new_value; /* If this element doesn't come next in sequence, put it on constructor_pending_elts. */ if (TREE_CODE (constructor_type) == ARRAY_TYPE && (!constructor_incremental || !tree_int_cst_equal (field, constructor_unfilled_index))) { if (constructor_incremental && tree_int_cst_lt (field, constructor_unfilled_index)) set_nonincremental_init (braced_init_obstack); add_pending_init (loc, field, value, origtype, implicit, braced_init_obstack); return; } else if (TREE_CODE (constructor_type) == RECORD_TYPE && (!constructor_incremental || field != constructor_unfilled_fields)) { /* We do this for records but not for unions. In a union, no matter which field is specified, it can be initialized right away since it starts at the beginning of the union. */ if (constructor_incremental) { if (!constructor_unfilled_fields) set_nonincremental_init (braced_init_obstack); else { tree bitpos, unfillpos; bitpos = bit_position (field); unfillpos = bit_position (constructor_unfilled_fields); if (tree_int_cst_lt (bitpos, unfillpos)) set_nonincremental_init (braced_init_obstack); } } add_pending_init (loc, field, value, origtype, implicit, braced_init_obstack); return; } else if (TREE_CODE (constructor_type) == UNION_TYPE && !vec_safe_is_empty (constructor_elements)) { if (!implicit) { if (TREE_SIDE_EFFECTS (constructor_elements->last ().value)) warning_init (loc, OPT_Woverride_init_side_effects, "initialized field with side-effects overwritten"); else if (warn_override_init) warning_init (loc, OPT_Woverride_init, "initialized field overwritten"); } /* We can have just one union field set. */ constructor_elements = NULL; } /* Otherwise, output this element either to constructor_elements or to the assembler file. */ constructor_elt celt = {field, value}; vec_safe_push (constructor_elements, celt); /* Advance the variable that indicates sequential elements output. */ if (TREE_CODE (constructor_type) == ARRAY_TYPE) constructor_unfilled_index = size_binop_loc (input_location, PLUS_EXPR, constructor_unfilled_index, bitsize_one_node); else if (TREE_CODE (constructor_type) == RECORD_TYPE) { constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields); /* Skip any nameless bit fields. */ while (constructor_unfilled_fields != NULL_TREE && DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields)) constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields); } else if (TREE_CODE (constructor_type) == UNION_TYPE) constructor_unfilled_fields = NULL_TREE; /* Now output any pending elements which have become next. */ if (pending) output_pending_init_elements (0, braced_init_obstack); } /* For two FIELD_DECLs in the same chain, return -1 if field1 comes before field2, 1 if field1 comes after field2 and 0 if field1 == field2. */ static int init_field_decl_cmp (tree field1, tree field2) { if (field1 == field2) return 0; tree bitpos1 = bit_position (field1); tree bitpos2 = bit_position (field2); if (tree_int_cst_equal (bitpos1, bitpos2)) { /* If one of the fields has non-zero bitsize, then that field must be the last one in a sequence of zero sized fields, fields after it will have bigger bit_position. */ if (TREE_TYPE (field1) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (field1)) && integer_nonzerop (TREE_TYPE (field1))) return 1; if (TREE_TYPE (field2) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (field2)) && integer_nonzerop (TREE_TYPE (field2))) return -1; /* Otherwise, fallback to DECL_CHAIN walk to find out which field comes earlier. Walk chains of both fields, so that if field1 and field2 are close to each other in either order, it is found soon even for large sequences of zero sized fields. */ tree f1 = field1, f2 = field2; while (1) { f1 = DECL_CHAIN (f1); f2 = DECL_CHAIN (f2); if (f1 == NULL_TREE) { gcc_assert (f2); return 1; } if (f2 == NULL_TREE) return -1; if (f1 == field2) return -1; if (f2 == field1) return 1; if (!tree_int_cst_equal (bit_position (f1), bitpos1)) return 1; if (!tree_int_cst_equal (bit_position (f2), bitpos1)) return -1; } } else if (tree_int_cst_lt (bitpos1, bitpos2)) return -1; else return 1; } /* Output any pending elements which have become next. As we output elements, constructor_unfilled_{fields,index} advances, which may cause other elements to become next; if so, they too are output. If ALL is 0, we return when there are no more pending elements to output now. If ALL is 1, we output space as necessary so that we can output all the pending elements. */ static void output_pending_init_elements (int all, struct obstack * braced_init_obstack) { struct init_node *elt = constructor_pending_elts; tree next; retry: /* Look through the whole pending tree. If we find an element that should be output now, output it. Otherwise, set NEXT to the element that comes first among those still pending. */ next = NULL_TREE; while (elt) { if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (tree_int_cst_equal (elt->purpose, constructor_unfilled_index)) output_init_element (input_location, elt->value, elt->origtype, true, TREE_TYPE (constructor_type), constructor_unfilled_index, false, false, braced_init_obstack); else if (tree_int_cst_lt (constructor_unfilled_index, elt->purpose)) { /* Advance to the next smaller node. */ if (elt->left) elt = elt->left; else { /* We have reached the smallest node bigger than the current unfilled index. Fill the space first. */ next = elt->purpose; break; } } else { /* Advance to the next bigger node. */ if (elt->right) elt = elt->right; else { /* We have reached the biggest node in a subtree. Find the parent of it, which is the next bigger node. */ while (elt->parent && elt->parent->right == elt) elt = elt->parent; elt = elt->parent; if (elt && tree_int_cst_lt (constructor_unfilled_index, elt->purpose)) { next = elt->purpose; break; } } } } else if (RECORD_OR_UNION_TYPE_P (constructor_type)) { /* If the current record is complete we are done. */ if (constructor_unfilled_fields == NULL_TREE) break; int cmp = init_field_decl_cmp (constructor_unfilled_fields, elt->purpose); if (cmp == 0) output_init_element (input_location, elt->value, elt->origtype, true, TREE_TYPE (elt->purpose), elt->purpose, false, false, braced_init_obstack); else if (cmp < 0) { /* Advance to the next smaller node. */ if (elt->left) elt = elt->left; else { /* We have reached the smallest node bigger than the current unfilled field. Fill the space first. */ next = elt->purpose; break; } } else { /* Advance to the next bigger node. */ if (elt->right) elt = elt->right; else { /* We have reached the biggest node in a subtree. Find the parent of it, which is the next bigger node. */ while (elt->parent && elt->parent->right == elt) elt = elt->parent; elt = elt->parent; if (elt && init_field_decl_cmp (constructor_unfilled_fields, elt->purpose) < 0) { next = elt->purpose; break; } } } } } /* Ordinarily return, but not if we want to output all and there are elements left. */ if (!(all && next != NULL_TREE)) return; /* If it's not incremental, just skip over the gap, so that after jumping to retry we will output the next successive element. */ if (RECORD_OR_UNION_TYPE_P (constructor_type)) constructor_unfilled_fields = next; else if (TREE_CODE (constructor_type) == ARRAY_TYPE) constructor_unfilled_index = next; /* ELT now points to the node in the pending tree with the next initializer to output. */ goto retry; } /* Add one non-braced element to the current constructor level. This adjusts the current position within the constructor's type. This may also start or terminate implicit levels to handle a partly-braced initializer. Once this has found the correct level for the new element, it calls output_init_element. IMPLICIT is true if value comes from pop_init_level (1), the new initializer has been merged with the existing one and thus no warnings should be emitted about overriding an existing initializer. */ void process_init_element (location_t loc, struct c_expr value, bool implicit, struct obstack * braced_init_obstack) { tree orig_value = value.value; int string_flag = (orig_value != NULL_TREE && TREE_CODE (orig_value) == STRING_CST); bool strict_string = value.original_code == STRING_CST; bool was_designated = designator_depth != 0; designator_depth = 0; designator_erroneous = 0; if (!implicit && value.value && !integer_zerop (value.value)) constructor_zeroinit = 0; /* Handle superfluous braces around string cst as in char x[] = {"foo"}; */ if (string_flag && constructor_type && !was_designated && TREE_CODE (constructor_type) == ARRAY_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (constructor_type)) && integer_zerop (constructor_unfilled_index)) { if (constructor_stack->replacement_value.value) error_init (loc, "excess elements in char array initializer"); constructor_stack->replacement_value = value; return; } if (constructor_stack->replacement_value.value != NULL_TREE) { error_init (loc, "excess elements in struct initializer"); return; } /* Ignore elements of a brace group if it is entirely superfluous and has already been diagnosed. */ if (constructor_type == NULL_TREE) return; if (!implicit && warn_designated_init && !was_designated && TREE_CODE (constructor_type) == RECORD_TYPE && lookup_attribute ("designated_init", TYPE_ATTRIBUTES (constructor_type))) warning_init (loc, OPT_Wdesignated_init, "positional initialization of field " "in %<struct%> declared with %<designated_init%> attribute"); /* If we've exhausted any levels that didn't have braces, pop them now. */ while (constructor_stack->implicit) { if (RECORD_OR_UNION_TYPE_P (constructor_type) && constructor_fields == NULL_TREE) process_init_element (loc, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); else if ((TREE_CODE (constructor_type) == ARRAY_TYPE || VECTOR_TYPE_P (constructor_type)) && constructor_max_index && tree_int_cst_lt (constructor_max_index, constructor_index)) process_init_element (loc, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); else break; } /* In the case of [LO ... HI] = VALUE, only evaluate VALUE once. */ if (constructor_range_stack) { /* If value is a compound literal and we'll be just using its content, don't put it into a SAVE_EXPR. */ if (TREE_CODE (value.value) != COMPOUND_LITERAL_EXPR || !require_constant_value) { tree semantic_type = NULL_TREE; if (TREE_CODE (value.value) == EXCESS_PRECISION_EXPR) { semantic_type = TREE_TYPE (value.value); value.value = TREE_OPERAND (value.value, 0); } value.value = save_expr (value.value); if (semantic_type) value.value = build1 (EXCESS_PRECISION_EXPR, semantic_type, value.value); } } while (1) { if (TREE_CODE (constructor_type) == RECORD_TYPE) { tree fieldtype; enum tree_code fieldcode; if (constructor_fields == NULL_TREE) { pedwarn_init (loc, 0, "excess elements in struct initializer"); break; } fieldtype = TREE_TYPE (constructor_fields); if (fieldtype != error_mark_node) fieldtype = TYPE_MAIN_VARIANT (fieldtype); fieldcode = TREE_CODE (fieldtype); /* Error for non-static initialization of a flexible array member. */ if (fieldcode == ARRAY_TYPE && !require_constant_value && TYPE_SIZE (fieldtype) == NULL_TREE && DECL_CHAIN (constructor_fields) == NULL_TREE) { error_init (loc, "non-static initialization of a flexible " "array member"); break; } /* Error for initialization of a flexible array member with a string constant if the structure is in an array. E.g.: struct S { int x; char y[]; }; struct S s[] = { { 1, "foo" } }; is invalid. */ if (string_flag && fieldcode == ARRAY_TYPE && constructor_depth > 1 && TYPE_SIZE (fieldtype) == NULL_TREE && DECL_CHAIN (constructor_fields) == NULL_TREE) { bool in_array_p = false; for (struct constructor_stack *p = constructor_stack; p && p->type; p = p->next) if (TREE_CODE (p->type) == ARRAY_TYPE) { in_array_p = true; break; } if (in_array_p) { error_init (loc, "initialization of flexible array " "member in a nested context"); break; } } /* Accept a string constant to initialize a subarray. */ if (value.value != NULL_TREE && fieldcode == ARRAY_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (fieldtype)) && string_flag) value.value = orig_value; /* Otherwise, if we have come to a subaggregate, and we don't have an element of its type, push into it. */ else if (value.value != NULL_TREE && value.value != error_mark_node && TYPE_MAIN_VARIANT (TREE_TYPE (value.value)) != fieldtype && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE || fieldcode == UNION_TYPE || fieldcode == VECTOR_TYPE)) { push_init_level (loc, 1, braced_init_obstack); continue; } if (value.value) { push_member_name (constructor_fields); output_init_element (loc, value.value, value.original_type, strict_string, fieldtype, constructor_fields, true, implicit, braced_init_obstack); RESTORE_SPELLING_DEPTH (constructor_depth); } else /* Do the bookkeeping for an element that was directly output as a constructor. */ { /* For a record, keep track of end position of last field. */ if (DECL_SIZE (constructor_fields)) constructor_bit_index = size_binop_loc (input_location, PLUS_EXPR, bit_position (constructor_fields), DECL_SIZE (constructor_fields)); /* If the current field was the first one not yet written out, it isn't now, so update. */ if (constructor_unfilled_fields == constructor_fields) { constructor_unfilled_fields = DECL_CHAIN (constructor_fields); /* Skip any nameless bit fields. */ while (constructor_unfilled_fields != 0 && (DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields))) constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields); } } constructor_fields = DECL_CHAIN (constructor_fields); /* Skip any nameless bit fields at the beginning. */ while (constructor_fields != NULL_TREE && DECL_UNNAMED_BIT_FIELD (constructor_fields)) constructor_fields = DECL_CHAIN (constructor_fields); } else if (TREE_CODE (constructor_type) == UNION_TYPE) { tree fieldtype; enum tree_code fieldcode; if (constructor_fields == NULL_TREE) { pedwarn_init (loc, 0, "excess elements in union initializer"); break; } fieldtype = TREE_TYPE (constructor_fields); if (fieldtype != error_mark_node) fieldtype = TYPE_MAIN_VARIANT (fieldtype); fieldcode = TREE_CODE (fieldtype); /* Warn that traditional C rejects initialization of unions. We skip the warning if the value is zero. This is done under the assumption that the zero initializer in user code appears conditioned on e.g. __STDC__ to avoid "missing initializer" warnings and relies on default initialization to zero in the traditional C case. We also skip the warning if the initializer is designated, again on the assumption that this must be conditional on __STDC__ anyway (and we've already complained about the member-designator already). */ if (!in_system_header_at (input_location) && !constructor_designated && !(value.value && (integer_zerop (value.value) || real_zerop (value.value)))) warning (OPT_Wtraditional, "traditional C rejects initialization " "of unions"); /* Accept a string constant to initialize a subarray. */ if (value.value != NULL_TREE && fieldcode == ARRAY_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (fieldtype)) && string_flag) value.value = orig_value; /* Otherwise, if we have come to a subaggregate, and we don't have an element of its type, push into it. */ else if (value.value != NULL_TREE && value.value != error_mark_node && TYPE_MAIN_VARIANT (TREE_TYPE (value.value)) != fieldtype && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE || fieldcode == UNION_TYPE || fieldcode == VECTOR_TYPE)) { push_init_level (loc, 1, braced_init_obstack); continue; } if (value.value) { push_member_name (constructor_fields); output_init_element (loc, value.value, value.original_type, strict_string, fieldtype, constructor_fields, true, implicit, braced_init_obstack); RESTORE_SPELLING_DEPTH (constructor_depth); } else /* Do the bookkeeping for an element that was directly output as a constructor. */ { constructor_bit_index = DECL_SIZE (constructor_fields); constructor_unfilled_fields = DECL_CHAIN (constructor_fields); } constructor_fields = NULL_TREE; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type)); enum tree_code eltcode = TREE_CODE (elttype); /* Accept a string constant to initialize a subarray. */ if (value.value != NULL_TREE && eltcode == ARRAY_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (elttype)) && string_flag) value.value = orig_value; /* Otherwise, if we have come to a subaggregate, and we don't have an element of its type, push into it. */ else if (value.value != NULL_TREE && value.value != error_mark_node && TYPE_MAIN_VARIANT (TREE_TYPE (value.value)) != elttype && (eltcode == RECORD_TYPE || eltcode == ARRAY_TYPE || eltcode == UNION_TYPE || eltcode == VECTOR_TYPE)) { push_init_level (loc, 1, braced_init_obstack); continue; } if (constructor_max_index != NULL_TREE && (tree_int_cst_lt (constructor_max_index, constructor_index) || integer_all_onesp (constructor_max_index))) { pedwarn_init (loc, 0, "excess elements in array initializer"); break; } /* Now output the actual element. */ if (value.value) { push_array_bounds (tree_to_uhwi (constructor_index)); output_init_element (loc, value.value, value.original_type, strict_string, elttype, constructor_index, true, implicit, braced_init_obstack); RESTORE_SPELLING_DEPTH (constructor_depth); } constructor_index = size_binop_loc (input_location, PLUS_EXPR, constructor_index, bitsize_one_node); if (!value.value) /* If we are doing the bookkeeping for an element that was directly output as a constructor, we must update constructor_unfilled_index. */ constructor_unfilled_index = constructor_index; } else if (VECTOR_TYPE_P (constructor_type)) { tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type)); /* Do a basic check of initializer size. Note that vectors always have a fixed size derived from their type. */ if (tree_int_cst_lt (constructor_max_index, constructor_index)) { pedwarn_init (loc, 0, "excess elements in vector initializer"); break; } /* Now output the actual element. */ if (value.value) { if (TREE_CODE (value.value) == VECTOR_CST) elttype = TYPE_MAIN_VARIANT (constructor_type); output_init_element (loc, value.value, value.original_type, strict_string, elttype, constructor_index, true, implicit, braced_init_obstack); } constructor_index = size_binop_loc (input_location, PLUS_EXPR, constructor_index, bitsize_one_node); if (!value.value) /* If we are doing the bookkeeping for an element that was directly output as a constructor, we must update constructor_unfilled_index. */ constructor_unfilled_index = constructor_index; } /* Handle the sole element allowed in a braced initializer for a scalar variable. */ else if (constructor_type != error_mark_node && constructor_fields == NULL_TREE) { pedwarn_init (loc, 0, "excess elements in scalar initializer"); break; } else { if (value.value) output_init_element (loc, value.value, value.original_type, strict_string, constructor_type, NULL_TREE, true, implicit, braced_init_obstack); constructor_fields = NULL_TREE; } /* Handle range initializers either at this level or anywhere higher in the designator stack. */ if (constructor_range_stack) { struct constructor_range_stack *p, *range_stack; int finish = 0; range_stack = constructor_range_stack; constructor_range_stack = 0; while (constructor_stack != range_stack->stack) { gcc_assert (constructor_stack->implicit); process_init_element (loc, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); } for (p = range_stack; !p->range_end || tree_int_cst_equal (p->index, p->range_end); p = p->prev) { gcc_assert (constructor_stack->implicit); process_init_element (loc, pop_init_level (loc, 1, braced_init_obstack, last_init_list_comma), true, braced_init_obstack); } p->index = size_binop_loc (input_location, PLUS_EXPR, p->index, bitsize_one_node); if (tree_int_cst_equal (p->index, p->range_end) && !p->prev) finish = 1; while (1) { constructor_index = p->index; constructor_fields = p->fields; if (finish && p->range_end && p->index == p->range_start) { finish = 0; p->prev = 0; } p = p->next; if (!p) break; finish_implicit_inits (loc, braced_init_obstack); push_init_level (loc, 2, braced_init_obstack); p->stack = constructor_stack; if (p->range_end && tree_int_cst_equal (p->index, p->range_end)) p->index = p->range_start; } if (!finish) constructor_range_stack = range_stack; continue; } break; } constructor_range_stack = 0; } /* Build a complete asm-statement, whose components are a CV_QUALIFIER (guaranteed to be 'volatile' or null) and ARGS (represented using an ASM_EXPR node). */ tree build_asm_stmt (bool is_volatile, tree args) { if (is_volatile) ASM_VOLATILE_P (args) = 1; return add_stmt (args); } /* Build an asm-expr, whose components are a STRING, some OUTPUTS, some INPUTS, and some CLOBBERS. The latter three may be NULL. SIMPLE indicates whether there was anything at all after the string in the asm expression -- asm("blah") and asm("blah" : ) are subtly different. We use a ASM_EXPR node to represent this. LOC is the location of the asm, and IS_INLINE says whether this is asm inline. */ tree build_asm_expr (location_t loc, tree string, tree outputs, tree inputs, tree clobbers, tree labels, bool simple, bool is_inline) { tree tail; tree args; int i; const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; int ninputs, noutputs; ninputs = list_length (inputs); noutputs = list_length (outputs); oconstraints = (const char **) alloca (noutputs * sizeof (const char *)); string = resolve_asm_operand_names (string, outputs, inputs, labels); /* Remove output conversions that change the type but not the mode. */ for (i = 0, tail = outputs; tail; ++i, tail = TREE_CHAIN (tail)) { tree output = TREE_VALUE (tail); output = c_fully_fold (output, false, NULL, true); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (output); if (!lvalue_or_else (loc, output, lv_asm)) output = error_mark_node; if (output != error_mark_node && (TREE_READONLY (output) || TYPE_READONLY (TREE_TYPE (output)) || (RECORD_OR_UNION_TYPE_P (TREE_TYPE (output)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (output))))) readonly_error (loc, output, lv_asm); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !c_mark_addressable (output)) output = error_mark_node; if (!(!allows_reg && allows_mem) && output != error_mark_node && VOID_TYPE_P (TREE_TYPE (output))) { error_at (loc, "invalid use of void expression"); output = error_mark_node; } } else output = error_mark_node; TREE_VALUE (tail) = output; } for (i = 0, tail = inputs; tail; ++i, tail = TREE_CHAIN (tail)) { tree input; constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail))); input = TREE_VALUE (tail); if (parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { input = c_fully_fold (input, false, NULL, true); /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (input); if (!c_mark_addressable (input)) input = error_mark_node; } else { struct c_expr expr; memset (&expr, 0, sizeof (expr)); expr.value = input; expr = convert_lvalue_to_rvalue (loc, expr, true, false); input = c_fully_fold (expr.value, false, NULL); if (input != error_mark_node && VOID_TYPE_P (TREE_TYPE (input))) { error_at (loc, "invalid use of void expression"); input = error_mark_node; } } } else input = error_mark_node; TREE_VALUE (tail) = input; } /* ASMs with labels cannot have outputs. This should have been enforced by the parser. */ gcc_assert (outputs == NULL || labels == NULL); args = build_stmt (loc, ASM_EXPR, string, outputs, inputs, clobbers, labels); /* asm statements without outputs, including simple ones, are treated as volatile. */ ASM_INPUT_P (args) = simple; ASM_VOLATILE_P (args) = (noutputs == 0); ASM_INLINE_P (args) = is_inline; return args; } /* Generate a goto statement to LABEL. LOC is the location of the GOTO. */ tree c_finish_goto_label (location_t loc, tree label) { tree decl = lookup_label_for_goto (loc, label); if (!decl) return NULL_TREE; TREE_USED (decl) = 1; { add_stmt (build_predict_expr (PRED_GOTO, NOT_TAKEN)); tree t = build1 (GOTO_EXPR, void_type_node, decl); SET_EXPR_LOCATION (t, loc); return add_stmt (t); } } /* Generate a computed goto statement to EXPR. LOC is the location of the GOTO. */ tree c_finish_goto_ptr (location_t loc, tree expr) { tree t; pedwarn (loc, OPT_Wpedantic, "ISO C forbids %<goto *expr;%>"); expr = c_fully_fold (expr, false, NULL); expr = convert (ptr_type_node, expr); t = build1 (GOTO_EXPR, void_type_node, expr); SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Generate a C `return' statement. RETVAL is the expression for what to return, or a null pointer for `return;' with no value. LOC is the location of the return statement, or the location of the expression, if the statement has any. If ORIGTYPE is not NULL_TREE, it is the original type of RETVAL. */ tree c_finish_return (location_t loc, tree retval, tree origtype) { tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl)), ret_stmt; bool no_warning = false; bool npc = false; /* Use the expansion point to handle cases such as returning NULL in a function returning void. */ source_location xloc = expansion_point_location_if_in_system_header (loc); if (TREE_THIS_VOLATILE (current_function_decl)) warning_at (xloc, 0, "function declared %<noreturn%> has a %<return%> statement"); if (retval) { tree semantic_type = NULL_TREE; npc = null_pointer_constant_p (retval); if (TREE_CODE (retval) == EXCESS_PRECISION_EXPR) { semantic_type = TREE_TYPE (retval); retval = TREE_OPERAND (retval, 0); } retval = c_fully_fold (retval, false, NULL); if (semantic_type) retval = build1 (EXCESS_PRECISION_EXPR, semantic_type, retval); } if (!retval) { current_function_returns_null = 1; if ((warn_return_type || flag_isoc99) && valtype != NULL_TREE && TREE_CODE (valtype) != VOID_TYPE) { bool warned_here; if (flag_isoc99) warned_here = pedwarn (loc, 0, "%<return%> with no value, in function returning non-void"); else warned_here = warning_at (loc, OPT_Wreturn_type, "%<return%> with no value, in function returning non-void"); no_warning = true; if (warned_here) inform (DECL_SOURCE_LOCATION (current_function_decl), "declared here"); } } else if (valtype == NULL_TREE || TREE_CODE (valtype) == VOID_TYPE) { current_function_returns_null = 1; bool warned_here; if (TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE) warned_here = pedwarn (xloc, 0, "%<return%> with a value, in function returning void"); else warned_here = pedwarn (xloc, OPT_Wpedantic, "ISO C forbids " "%<return%> with expression, in function returning void"); if (warned_here) inform (DECL_SOURCE_LOCATION (current_function_decl), "declared here"); } else { tree t = convert_for_assignment (loc, UNKNOWN_LOCATION, valtype, retval, origtype, ic_return, npc, NULL_TREE, NULL_TREE, 0); tree res = DECL_RESULT (current_function_decl); tree inner; bool save; current_function_returns_value = 1; if (t == error_mark_node) return NULL_TREE; save = in_late_binary_op; if (TREE_CODE (TREE_TYPE (res)) == BOOLEAN_TYPE || TREE_CODE (TREE_TYPE (res)) == COMPLEX_TYPE || (TREE_CODE (TREE_TYPE (t)) == REAL_TYPE && (TREE_CODE (TREE_TYPE (res)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (res)) == ENUMERAL_TYPE) && sanitize_flags_p (SANITIZE_FLOAT_CAST))) in_late_binary_op = true; inner = t = convert (TREE_TYPE (res), t); in_late_binary_op = save; /* Strip any conversions, additions, and subtractions, and see if we are returning the address of a local variable. Warn if so. */ while (1) { switch (TREE_CODE (inner)) { CASE_CONVERT: case NON_LVALUE_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: inner = TREE_OPERAND (inner, 0); continue; case MINUS_EXPR: /* If the second operand of the MINUS_EXPR has a pointer type (or is converted from it), this may be valid, so don't give a warning. */ { tree op1 = TREE_OPERAND (inner, 1); while (!POINTER_TYPE_P (TREE_TYPE (op1)) && (CONVERT_EXPR_P (op1) || TREE_CODE (op1) == NON_LVALUE_EXPR)) op1 = TREE_OPERAND (op1, 0); if (POINTER_TYPE_P (TREE_TYPE (op1))) break; inner = TREE_OPERAND (inner, 0); continue; } case ADDR_EXPR: inner = TREE_OPERAND (inner, 0); while (REFERENCE_CLASS_P (inner) && !INDIRECT_REF_P (inner)) inner = TREE_OPERAND (inner, 0); if (DECL_P (inner) && !DECL_EXTERNAL (inner) && !TREE_STATIC (inner) && DECL_CONTEXT (inner) == current_function_decl) { if (TREE_CODE (inner) == LABEL_DECL) warning_at (loc, OPT_Wreturn_local_addr, "function returns address of label"); else { warning_at (loc, OPT_Wreturn_local_addr, "function returns address of local variable"); tree zero = build_zero_cst (TREE_TYPE (res)); t = build2 (COMPOUND_EXPR, TREE_TYPE (res), t, zero); } } break; default: break; } break; } retval = build2 (MODIFY_EXPR, TREE_TYPE (res), res, t); SET_EXPR_LOCATION (retval, loc); if (warn_sequence_point) verify_sequence_points (retval); } ret_stmt = build_stmt (loc, RETURN_EXPR, retval); TREE_NO_WARNING (ret_stmt) |= no_warning; return add_stmt (ret_stmt); } struct c_switch { /* The SWITCH_EXPR being built. */ tree switch_expr; /* The original type of the testing expression, i.e. before the default conversion is applied. */ tree orig_type; /* A splay-tree mapping the low element of a case range to the high element, or NULL_TREE if there is no high element. Used to determine whether or not a new case label duplicates an old case label. We need a tree, rather than simply a hash table, because of the GNU case range extension. */ splay_tree cases; /* The bindings at the point of the switch. This is used for warnings crossing decls when branching to a case label. */ struct c_spot_bindings *bindings; /* The next node on the stack. */ struct c_switch *next; /* Remember whether the controlling expression had boolean type before integer promotions for the sake of -Wswitch-bool. */ bool bool_cond_p; /* Remember whether there was a case value that is outside the range of the ORIG_TYPE. */ bool outside_range_p; }; /* A stack of the currently active switch statements. The innermost switch statement is on the top of the stack. There is no need to mark the stack for garbage collection because it is only active during the processing of the body of a function, and we never collect at that point. */ struct c_switch *c_switch_stack; /* Start a C switch statement, testing expression EXP. Return the new SWITCH_EXPR. SWITCH_LOC is the location of the `switch'. SWITCH_COND_LOC is the location of the switch's condition. EXPLICIT_CAST_P is true if the expression EXP has an explicit cast. */ tree c_start_case (location_t switch_loc, location_t switch_cond_loc, tree exp, bool explicit_cast_p) { tree orig_type = error_mark_node; bool bool_cond_p = false; struct c_switch *cs; if (exp != error_mark_node) { orig_type = TREE_TYPE (exp); if (!INTEGRAL_TYPE_P (orig_type)) { if (orig_type != error_mark_node) { error_at (switch_cond_loc, "switch quantity not an integer"); orig_type = error_mark_node; } exp = integer_zero_node; } else { tree type = TYPE_MAIN_VARIANT (orig_type); tree e = exp; /* Warn if the condition has boolean value. */ while (TREE_CODE (e) == COMPOUND_EXPR) e = TREE_OPERAND (e, 1); if ((TREE_CODE (type) == BOOLEAN_TYPE || truth_value_p (TREE_CODE (e))) /* Explicit cast to int suppresses this warning. */ && !(TREE_CODE (type) == INTEGER_TYPE && explicit_cast_p)) bool_cond_p = true; if (!in_system_header_at (input_location) && (type == long_integer_type_node || type == long_unsigned_type_node)) warning_at (switch_cond_loc, OPT_Wtraditional, "%<long%> switch expression not " "converted to %<int%> in ISO C"); exp = c_fully_fold (exp, false, NULL); exp = default_conversion (exp); if (warn_sequence_point) verify_sequence_points (exp); } } /* Add this new SWITCH_EXPR to the stack. */ cs = XNEW (struct c_switch); cs->switch_expr = build2 (SWITCH_EXPR, orig_type, exp, NULL_TREE); SET_EXPR_LOCATION (cs->switch_expr, switch_loc); cs->orig_type = orig_type; cs->cases = splay_tree_new (case_compare, NULL, NULL); cs->bindings = c_get_switch_bindings (); cs->bool_cond_p = bool_cond_p; cs->outside_range_p = false; cs->next = c_switch_stack; c_switch_stack = cs; return add_stmt (cs->switch_expr); } /* Process a case label at location LOC. */ tree do_case (location_t loc, tree low_value, tree high_value) { tree label = NULL_TREE; if (low_value && TREE_CODE (low_value) != INTEGER_CST) { low_value = c_fully_fold (low_value, false, NULL); if (TREE_CODE (low_value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "case label is not an integer constant expression"); } if (high_value && TREE_CODE (high_value) != INTEGER_CST) { high_value = c_fully_fold (high_value, false, NULL); if (TREE_CODE (high_value) == INTEGER_CST) pedwarn (input_location, OPT_Wpedantic, "case label is not an integer constant expression"); } if (c_switch_stack == NULL) { if (low_value) error_at (loc, "case label not within a switch statement"); else error_at (loc, "%<default%> label not within a switch statement"); return NULL_TREE; } if (c_check_switch_jump_warnings (c_switch_stack->bindings, EXPR_LOCATION (c_switch_stack->switch_expr), loc)) return NULL_TREE; label = c_add_case_label (loc, c_switch_stack->cases, SWITCH_COND (c_switch_stack->switch_expr), c_switch_stack->orig_type, low_value, high_value, &c_switch_stack->outside_range_p); if (label == error_mark_node) label = NULL_TREE; return label; } /* Finish the switch statement. TYPE is the original type of the controlling expression of the switch, or NULL_TREE. */ void c_finish_case (tree body, tree type) { struct c_switch *cs = c_switch_stack; location_t switch_location; SWITCH_BODY (cs->switch_expr) = body; /* Emit warnings as needed. */ switch_location = EXPR_LOCATION (cs->switch_expr); c_do_switch_warnings (cs->cases, switch_location, type ? type : TREE_TYPE (cs->switch_expr), SWITCH_COND (cs->switch_expr), cs->bool_cond_p, cs->outside_range_p); if (c_switch_covers_all_cases_p (cs->cases, TREE_TYPE (cs->switch_expr))) SWITCH_ALL_CASES_P (cs->switch_expr) = 1; /* Pop the stack. */ c_switch_stack = cs->next; splay_tree_delete (cs->cases); c_release_switch_bindings (cs->bindings); XDELETE (cs); } /* Emit an if statement. IF_LOCUS is the location of the 'if'. COND, THEN_BLOCK and ELSE_BLOCK are expressions to be used; ELSE_BLOCK may be null. */ void c_finish_if_stmt (location_t if_locus, tree cond, tree then_block, tree else_block) { tree stmt; stmt = build3 (COND_EXPR, void_type_node, cond, then_block, else_block); SET_EXPR_LOCATION (stmt, if_locus); add_stmt (stmt); } /* Emit a general-purpose loop construct. START_LOCUS is the location of the beginning of the loop. COND is the loop condition. COND_IS_FIRST is false for DO loops. INCR is the FOR increment expression. BODY is the statement controlled by the loop. BLAB is the break label. CLAB is the continue label. Everything is allowed to be NULL. */ void c_finish_loop (location_t start_locus, tree cond, tree incr, tree body, tree blab, tree clab, bool cond_is_first) { tree entry = NULL, exit = NULL, t; /* If the condition is zero don't generate a loop construct. */ if (cond && integer_zerop (cond)) { if (cond_is_first) { t = build_and_jump (&blab); SET_EXPR_LOCATION (t, start_locus); add_stmt (t); } } else { tree top = build1 (LABEL_EXPR, void_type_node, NULL_TREE); /* If we have an exit condition, then we build an IF with gotos either out of the loop, or to the top of it. If there's no exit condition, then we just build a jump back to the top. */ exit = build_and_jump (&LABEL_EXPR_LABEL (top)); if (cond && !integer_nonzerop (cond)) { /* Canonicalize the loop condition to the end. This means generating a branch to the loop condition. Reuse the continue label, if possible. */ if (cond_is_first) { if (incr || !clab) { entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE); t = build_and_jump (&LABEL_EXPR_LABEL (entry)); } else t = build1 (GOTO_EXPR, void_type_node, clab); SET_EXPR_LOCATION (t, start_locus); add_stmt (t); } t = build_and_jump (&blab); if (cond_is_first) exit = fold_build3_loc (start_locus, COND_EXPR, void_type_node, cond, exit, t); else exit = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, exit, t); } else { /* For the backward-goto's location of an unconditional loop use the beginning of the body, or, if there is none, the top of the loop. */ location_t loc = EXPR_LOCATION (expr_first (body)); if (loc == UNKNOWN_LOCATION) loc = start_locus; SET_EXPR_LOCATION (exit, loc); } add_stmt (top); } if (body) add_stmt (body); if (clab) add_stmt (build1 (LABEL_EXPR, void_type_node, clab)); if (incr) add_stmt (incr); if (entry) add_stmt (entry); if (exit) add_stmt (exit); if (blab) add_stmt (build1 (LABEL_EXPR, void_type_node, blab)); } tree c_finish_bc_stmt (location_t loc, tree *label_p, bool is_break) { bool skip; tree label = *label_p; /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ skip = !block_may_fallthru (cur_stmt_list); if (!label) { if (!skip) *label_p = label = create_artificial_label (loc); } else if (TREE_CODE (label) == LABEL_DECL) ; else switch (TREE_INT_CST_LOW (label)) { case 0: if (is_break) error_at (loc, "break statement not within loop or switch"); else error_at (loc, "continue statement not within a loop"); return NULL_TREE; case 1: gcc_assert (is_break); error_at (loc, "break statement used with OpenMP for loop"); return NULL_TREE; case 2: if (is_break) error ("break statement within %<#pragma simd%> loop body"); else error ("continue statement within %<#pragma simd%> loop body"); return NULL_TREE; default: gcc_unreachable (); } if (skip) return NULL_TREE; if (!is_break) add_stmt (build_predict_expr (PRED_CONTINUE, NOT_TAKEN)); return add_stmt (build1 (GOTO_EXPR, void_type_node, label)); } /* A helper routine for c_process_expr_stmt and c_finish_stmt_expr. */ static void emit_side_effect_warnings (location_t loc, tree expr) { if (expr == error_mark_node) ; else if (!TREE_SIDE_EFFECTS (expr)) { if (!VOID_TYPE_P (TREE_TYPE (expr)) && !TREE_NO_WARNING (expr)) warning_at (loc, OPT_Wunused_value, "statement with no effect"); } else if (TREE_CODE (expr) == COMPOUND_EXPR) { tree r = expr; location_t cloc = loc; while (TREE_CODE (r) == COMPOUND_EXPR) { if (EXPR_HAS_LOCATION (r)) cloc = EXPR_LOCATION (r); r = TREE_OPERAND (r, 1); } if (!TREE_SIDE_EFFECTS (r) && !VOID_TYPE_P (TREE_TYPE (r)) && !CONVERT_EXPR_P (r) && !TREE_NO_WARNING (r) && !TREE_NO_WARNING (expr)) warning_at (cloc, OPT_Wunused_value, "right-hand operand of comma expression has no effect"); } else warn_if_unused_value (expr, loc); } /* Process an expression as if it were a complete statement. Emit diagnostics, but do not call ADD_STMT. LOC is the location of the statement. */ tree c_process_expr_stmt (location_t loc, tree expr) { tree exprv; if (!expr) return NULL_TREE; expr = c_fully_fold (expr, false, NULL); if (warn_sequence_point) verify_sequence_points (expr); if (TREE_TYPE (expr) != error_mark_node && !COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (expr)) && TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE) error_at (loc, "expression statement has incomplete type"); /* If we're not processing a statement expression, warn about unused values. Warnings for statement expressions will be emitted later, once we figure out which is the result. */ if (!STATEMENT_LIST_STMT_EXPR (cur_stmt_list) && warn_unused_value) emit_side_effect_warnings (EXPR_LOC_OR_LOC (expr, loc), expr); exprv = expr; while (TREE_CODE (exprv) == COMPOUND_EXPR) exprv = TREE_OPERAND (exprv, 1); while (CONVERT_EXPR_P (exprv)) exprv = TREE_OPERAND (exprv, 0); if (DECL_P (exprv) || handled_component_p (exprv) || TREE_CODE (exprv) == ADDR_EXPR) mark_exp_read (exprv); /* If the expression is not of a type to which we cannot assign a line number, wrap the thing in a no-op NOP_EXPR. */ if (DECL_P (expr) || CONSTANT_CLASS_P (expr)) { expr = build1 (NOP_EXPR, TREE_TYPE (expr), expr); SET_EXPR_LOCATION (expr, loc); } return expr; } /* Emit an expression as a statement. LOC is the location of the expression. */ tree c_finish_expr_stmt (location_t loc, tree expr) { if (expr) return add_stmt (c_process_expr_stmt (loc, expr)); else return NULL; } /* Do the opposite and emit a statement as an expression. To begin, create a new binding level and return it. */ tree c_begin_stmt_expr (void) { tree ret; /* We must force a BLOCK for this level so that, if it is not expanded later, there is a way to turn off the entire subtree of blocks that are contained in it. */ keep_next_level (); ret = c_begin_compound_stmt (true); c_bindings_start_stmt_expr (c_switch_stack == NULL ? NULL : c_switch_stack->bindings); /* Mark the current statement list as belonging to a statement list. */ STATEMENT_LIST_STMT_EXPR (ret) = 1; return ret; } /* LOC is the location of the compound statement to which this body belongs. */ tree c_finish_stmt_expr (location_t loc, tree body) { tree last, type, tmp, val; tree *last_p; body = c_end_compound_stmt (loc, body, true); c_bindings_end_stmt_expr (c_switch_stack == NULL ? NULL : c_switch_stack->bindings); /* Locate the last statement in BODY. See c_end_compound_stmt about always returning a BIND_EXPR. */ last_p = &BIND_EXPR_BODY (body); last = BIND_EXPR_BODY (body); continue_searching: if (TREE_CODE (last) == STATEMENT_LIST) { tree_stmt_iterator l = tsi_last (last); while (!tsi_end_p (l) && TREE_CODE (tsi_stmt (l)) == DEBUG_BEGIN_STMT) tsi_prev (&l); /* This can happen with degenerate cases like ({ }). No value. */ if (tsi_end_p (l)) return body; /* If we're supposed to generate side effects warnings, process all of the statements except the last. */ if (warn_unused_value) { for (tree_stmt_iterator i = tsi_start (last); tsi_stmt (i) != tsi_stmt (l); tsi_next (&i)) { location_t tloc; tree t = tsi_stmt (i); tloc = EXPR_HAS_LOCATION (t) ? EXPR_LOCATION (t) : loc; emit_side_effect_warnings (tloc, t); } } last_p = tsi_stmt_ptr (l); last = *last_p; } /* If the end of the list is exception related, then the list was split by a call to push_cleanup. Continue searching. */ if (TREE_CODE (last) == TRY_FINALLY_EXPR || TREE_CODE (last) == TRY_CATCH_EXPR) { last_p = &TREE_OPERAND (last, 0); last = *last_p; goto continue_searching; } if (last == error_mark_node) return last; /* In the case that the BIND_EXPR is not necessary, return the expression out from inside it. */ if ((last == BIND_EXPR_BODY (body) /* Skip nested debug stmts. */ || last == expr_first (BIND_EXPR_BODY (body))) && BIND_EXPR_VARS (body) == NULL) { /* Even if this looks constant, do not allow it in a constant expression. */ last = c_wrap_maybe_const (last, true); /* Do not warn if the return value of a statement expression is unused. */ TREE_NO_WARNING (last) = 1; return last; } /* Extract the type of said expression. */ type = TREE_TYPE (last); /* If we're not returning a value at all, then the BIND_EXPR that we already have is a fine expression to return. */ if (!type || VOID_TYPE_P (type)) return body; /* Now that we've located the expression containing the value, it seems silly to make voidify_wrapper_expr repeat the process. Create a temporary of the appropriate type and stick it in a TARGET_EXPR. */ tmp = create_tmp_var_raw (type); /* Unwrap a no-op NOP_EXPR as added by c_finish_expr_stmt. This avoids tree_expr_nonnegative_p giving up immediately. */ val = last; if (TREE_CODE (val) == NOP_EXPR && TREE_TYPE (val) == TREE_TYPE (TREE_OPERAND (val, 0))) val = TREE_OPERAND (val, 0); *last_p = build2 (MODIFY_EXPR, void_type_node, tmp, val); SET_EXPR_LOCATION (*last_p, EXPR_LOCATION (last)); { tree t = build4 (TARGET_EXPR, type, tmp, body, NULL_TREE, NULL_TREE); SET_EXPR_LOCATION (t, loc); return t; } } /* Begin and end compound statements. This is as simple as pushing and popping new statement lists from the tree. */ tree c_begin_compound_stmt (bool do_scope) { tree stmt = push_stmt_list (); if (do_scope) push_scope (); return stmt; } /* End a compound statement. STMT is the statement. LOC is the location of the compound statement-- this is usually the location of the opening brace. */ tree c_end_compound_stmt (location_t loc, tree stmt, bool do_scope) { tree block = NULL; if (do_scope) { if (c_dialect_objc ()) objc_clear_super_receiver (); block = pop_scope (); } stmt = pop_stmt_list (stmt); stmt = c_build_bind_expr (loc, block, stmt); /* If this compound statement is nested immediately inside a statement expression, then force a BIND_EXPR to be created. Otherwise we'll do the wrong thing for ({ { 1; } }) or ({ 1; { } }). In particular, STATEMENT_LISTs merge, and thus we can lose track of what statement was really last. */ if (building_stmt_list_p () && STATEMENT_LIST_STMT_EXPR (cur_stmt_list) && TREE_CODE (stmt) != BIND_EXPR) { stmt = build3 (BIND_EXPR, void_type_node, NULL, stmt, NULL); TREE_SIDE_EFFECTS (stmt) = 1; SET_EXPR_LOCATION (stmt, loc); } return stmt; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { enum tree_code code; tree stmt, list; bool stmt_expr; code = eh_only ? TRY_CATCH_EXPR : TRY_FINALLY_EXPR; stmt = build_stmt (DECL_SOURCE_LOCATION (decl), code, NULL, cleanup); add_stmt (stmt); stmt_expr = STATEMENT_LIST_STMT_EXPR (cur_stmt_list); list = push_stmt_list (); TREE_OPERAND (stmt, 0) = list; STATEMENT_LIST_STMT_EXPR (list) = stmt_expr; } /* Build a vector comparison of ARG0 and ARG1 using CODE opcode into a value of TYPE type. Comparison is done via VEC_COND_EXPR. */ static tree build_vec_cmp (tree_code code, tree type, tree arg0, tree arg1) { tree zero_vec = build_zero_cst (type); tree minus_one_vec = build_minus_one_cst (type); tree cmp_type = build_same_sized_truth_vector_type (type); tree cmp = build2 (code, cmp_type, arg0, arg1); return build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec); } /* Build a binary-operation expression without default conversions. CODE is the kind of expression to build. LOCATION is the operator's location. This function differs from `build' in several ways: the data type of the result is computed and recorded in it, warnings are generated if arg data types are invalid, special handling for addition and subtraction of pointers is known, and some optimization is done (operations on narrow ints are done in the narrower type when that gives the same result). Constant folding is also done before the result is returned. Note that the operands will never have enumeral types, or function or array types, because either they will have the default conversions performed or they have both just been converted to some other type in which the arithmetic is to be done. */ tree build_binary_op (location_t location, enum tree_code code, tree orig_op0, tree orig_op1, bool convert_p) { tree type0, type1, orig_type0, orig_type1; tree eptype; enum tree_code code0, code1; tree op0, op1; tree ret = error_mark_node; const char *invalid_op_diag; bool op0_int_operands, op1_int_operands; bool int_const, int_const_or_overflow, int_operands; /* Expression code to give to the expression when it is built. Normally this is CODE, which is what the caller asked for, but in some special cases we change it. */ enum tree_code resultcode = code; /* Data type in which the computation is to be performed. In the simplest cases this is the common type of the arguments. */ tree result_type = NULL; /* When the computation is in excess precision, the type of the final EXCESS_PRECISION_EXPR. */ tree semantic_result_type = NULL; /* Nonzero means operands have already been type-converted in whatever way is necessary. Zero means they need to be converted to RESULT_TYPE. */ int converted = 0; /* Nonzero means create the expression with this type, rather than RESULT_TYPE. */ tree build_type = NULL_TREE; /* Nonzero means after finally constructing the expression convert it to this type. */ tree final_type = NULL_TREE; /* Nonzero if this is an operation like MIN or MAX which can safely be computed in short if both args are promoted shorts. Also implies COMMON. -1 indicates a bitwise operation; this makes a difference in the exact conditions for when it is safe to do the operation in a narrower mode. */ int shorten = 0; /* Nonzero if this is a comparison operation; if both args are promoted shorts, compare the original shorts. Also implies COMMON. */ int short_compare = 0; /* Nonzero if this is a right-shift operation, which can be computed on the original short and then promoted if the operand is a promoted short. */ int short_shift = 0; /* Nonzero means set RESULT_TYPE to the common type of the args. */ int common = 0; /* True means types are compatible as far as ObjC is concerned. */ bool objc_ok; /* True means this is an arithmetic operation that may need excess precision. */ bool may_need_excess_precision; /* True means this is a boolean operation that converts both its operands to truth-values. */ bool boolean_op = false; /* Remember whether we're doing / or %. */ bool doing_div_or_mod = false; /* Remember whether we're doing << or >>. */ bool doing_shift = false; /* Tree holding instrumentation expression. */ tree instrument_expr = NULL; if (location == UNKNOWN_LOCATION) location = input_location; op0 = orig_op0; op1 = orig_op1; op0_int_operands = EXPR_INT_CONST_OPERANDS (orig_op0); if (op0_int_operands) op0 = remove_c_maybe_const_expr (op0); op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1); if (op1_int_operands) op1 = remove_c_maybe_const_expr (op1); int_operands = (op0_int_operands && op1_int_operands); if (int_operands) { int_const_or_overflow = (TREE_CODE (orig_op0) == INTEGER_CST && TREE_CODE (orig_op1) == INTEGER_CST); int_const = (int_const_or_overflow && !TREE_OVERFLOW (orig_op0) && !TREE_OVERFLOW (orig_op1)); } else int_const = int_const_or_overflow = false; /* Do not apply default conversion in mixed vector/scalar expression. */ if (convert_p && VECTOR_TYPE_P (TREE_TYPE (op0)) == VECTOR_TYPE_P (TREE_TYPE (op1))) { op0 = default_conversion (op0); op1 = default_conversion (op1); } orig_type0 = type0 = TREE_TYPE (op0); orig_type1 = type1 = TREE_TYPE (op1); /* The expression codes of the data types of the arguments tell us whether the arguments are integers, floating, pointers, etc. */ code0 = TREE_CODE (type0); code1 = TREE_CODE (type1); /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (op0); STRIP_TYPE_NOPS (op1); /* If an error was already reported for one of the arguments, avoid reporting another error. */ if (code0 == ERROR_MARK || code1 == ERROR_MARK) return error_mark_node; if (code0 == POINTER_TYPE && reject_gcc_builtin (op0, EXPR_LOCATION (orig_op0))) return error_mark_node; if (code1 == POINTER_TYPE && reject_gcc_builtin (op1, EXPR_LOCATION (orig_op1))) return error_mark_node; if ((invalid_op_diag = targetm.invalid_binary_op (code, type0, type1))) { error_at (location, invalid_op_diag); return error_mark_node; } switch (code) { case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: may_need_excess_precision = true; break; default: may_need_excess_precision = false; break; } if (TREE_CODE (op0) == EXCESS_PRECISION_EXPR) { op0 = TREE_OPERAND (op0, 0); type0 = TREE_TYPE (op0); } else if (may_need_excess_precision && (eptype = excess_precision_type (type0)) != NULL_TREE) { type0 = eptype; op0 = convert (eptype, op0); } if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR) { op1 = TREE_OPERAND (op1, 0); type1 = TREE_TYPE (op1); } else if (may_need_excess_precision && (eptype = excess_precision_type (type1)) != NULL_TREE) { type1 = eptype; op1 = convert (eptype, op1); } objc_ok = objc_compare_types (type0, type1, -3, NULL_TREE); /* In case when one of the operands of the binary operation is a vector and another is a scalar -- convert scalar to vector. */ if ((code0 == VECTOR_TYPE) != (code1 == VECTOR_TYPE)) { enum stv_conv convert_flag = scalar_to_vector (location, code, op0, op1, true); switch (convert_flag) { case stv_error: return error_mark_node; case stv_firstarg: { bool maybe_const = true; tree sc; sc = c_fully_fold (op0, false, &maybe_const); sc = save_expr (sc); sc = convert (TREE_TYPE (type1), sc); op0 = build_vector_from_val (type1, sc); if (!maybe_const) op0 = c_wrap_maybe_const (op0, true); orig_type0 = type0 = TREE_TYPE (op0); code0 = TREE_CODE (type0); converted = 1; break; } case stv_secondarg: { bool maybe_const = true; tree sc; sc = c_fully_fold (op1, false, &maybe_const); sc = save_expr (sc); sc = convert (TREE_TYPE (type0), sc); op1 = build_vector_from_val (type0, sc); if (!maybe_const) op1 = c_wrap_maybe_const (op1, true); orig_type1 = type1 = TREE_TYPE (op1); code1 = TREE_CODE (type1); converted = 1; break; } default: break; } } switch (code) { case PLUS_EXPR: /* Handle the pointer + int case. */ if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) { ret = pointer_int_sum (location, PLUS_EXPR, op0, op1); goto return_build_binary_op; } else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE) { ret = pointer_int_sum (location, PLUS_EXPR, op1, op0); goto return_build_binary_op; } else common = 1; break; case MINUS_EXPR: /* Subtraction of two similar pointers. We must subtract them as integers, then divide by object size. */ if (code0 == POINTER_TYPE && code1 == POINTER_TYPE && comp_target_types (location, type0, type1)) { ret = pointer_diff (location, op0, op1, &instrument_expr); goto return_build_binary_op; } /* Handle pointer minus int. Just like pointer plus int. */ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) { ret = pointer_int_sum (location, MINUS_EXPR, op0, op1); goto return_build_binary_op; } else common = 1; break; case MULT_EXPR: common = 1; break; case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: doing_div_or_mod = true; warn_for_div_by_zero (location, op1); if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == FIXED_POINT_TYPE || code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == FIXED_POINT_TYPE || code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE)) { enum tree_code tcode0 = code0, tcode1 = code1; if (code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE) tcode0 = TREE_CODE (TREE_TYPE (TREE_TYPE (op0))); if (code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE) tcode1 = TREE_CODE (TREE_TYPE (TREE_TYPE (op1))); if (!((tcode0 == INTEGER_TYPE && tcode1 == INTEGER_TYPE) || (tcode0 == FIXED_POINT_TYPE && tcode1 == FIXED_POINT_TYPE))) resultcode = RDIV_EXPR; else /* Although it would be tempting to shorten always here, that loses on some targets, since the modulo instruction is undefined if the quotient can't be represented in the computation mode. We shorten only if unsigned or if dividing by something we know != -1. */ shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0)) || (TREE_CODE (op1) == INTEGER_CST && !integer_all_onesp (op1))); common = 1; } break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) shorten = -1; /* Allow vector types which are not floating point types. */ else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE && !VECTOR_FLOAT_TYPE_P (type0) && !VECTOR_FLOAT_TYPE_P (type1)) common = 1; break; case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: doing_div_or_mod = true; warn_for_div_by_zero (location, op1); if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE) common = 1; else if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) { /* Although it would be tempting to shorten always here, that loses on some targets, since the modulo instruction is undefined if the quotient can't be represented in the computation mode. We shorten only if unsigned or if dividing by something we know != -1. */ shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0)) || (TREE_CODE (op1) == INTEGER_CST && !integer_all_onesp (op1))); common = 1; } break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE || code0 == FIXED_POINT_TYPE) && (code1 == INTEGER_TYPE || code1 == POINTER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE || code1 == FIXED_POINT_TYPE)) { /* Result of these operations is always an int, but that does not mean the operands should be converted to ints! */ result_type = integer_type_node; if (op0_int_operands) { op0 = c_objc_common_truthvalue_conversion (location, orig_op0); op0 = remove_c_maybe_const_expr (op0); } else op0 = c_objc_common_truthvalue_conversion (location, op0); if (op1_int_operands) { op1 = c_objc_common_truthvalue_conversion (location, orig_op1); op1 = remove_c_maybe_const_expr (op1); } else op1 = c_objc_common_truthvalue_conversion (location, op1); converted = 1; boolean_op = true; } if (code == TRUTH_ANDIF_EXPR) { int_const_or_overflow = (int_operands && TREE_CODE (orig_op0) == INTEGER_CST && (op0 == truthvalue_false_node || TREE_CODE (orig_op1) == INTEGER_CST)); int_const = (int_const_or_overflow && !TREE_OVERFLOW (orig_op0) && (op0 == truthvalue_false_node || !TREE_OVERFLOW (orig_op1))); } else if (code == TRUTH_ORIF_EXPR) { int_const_or_overflow = (int_operands && TREE_CODE (orig_op0) == INTEGER_CST && (op0 == truthvalue_true_node || TREE_CODE (orig_op1) == INTEGER_CST)); int_const = (int_const_or_overflow && !TREE_OVERFLOW (orig_op0) && (op0 == truthvalue_true_node || !TREE_OVERFLOW (orig_op1))); } break; /* Shift operations: result has same type as first operand; always convert second operand to int. Also set SHORT_SHIFT if shifting rightward. */ case RSHIFT_EXPR: if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE && known_eq (TYPE_VECTOR_SUBPARTS (type0), TYPE_VECTOR_SUBPARTS (type1))) { result_type = type0; converted = 1; } else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE || (code0 == VECTOR_TYPE && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE)) && code1 == INTEGER_TYPE) { doing_shift = true; if (TREE_CODE (op1) == INTEGER_CST) { if (tree_int_cst_sgn (op1) < 0) { int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_count_negative, "right shift count is negative"); } else if (code0 == VECTOR_TYPE) { if (compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (type0))) >= 0) { int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_count_overflow, "right shift count >= width of vector element"); } } else { if (!integer_zerop (op1)) short_shift = 1; if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0) { int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_count_overflow, "right shift count >= width of type"); } } } /* Use the type of the value to be shifted. */ result_type = type0; /* Avoid converting op1 to result_type later. */ converted = 1; } break; case LSHIFT_EXPR: if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE && known_eq (TYPE_VECTOR_SUBPARTS (type0), TYPE_VECTOR_SUBPARTS (type1))) { result_type = type0; converted = 1; } else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE || (code0 == VECTOR_TYPE && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE)) && code1 == INTEGER_TYPE) { doing_shift = true; if (TREE_CODE (op0) == INTEGER_CST && tree_int_cst_sgn (op0) < 0) { /* Don't reject a left shift of a negative value in a context where a constant expression is needed in C90. */ if (flag_isoc99) int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_negative_value, "left shift of negative value"); } if (TREE_CODE (op1) == INTEGER_CST) { if (tree_int_cst_sgn (op1) < 0) { int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_count_negative, "left shift count is negative"); } else if (code0 == VECTOR_TYPE) { if (compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (type0))) >= 0) { int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_count_overflow, "left shift count >= width of vector element"); } } else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0) { int_const = false; if (c_inhibit_evaluation_warnings == 0) warning_at (location, OPT_Wshift_count_overflow, "left shift count >= width of type"); } else if (TREE_CODE (op0) == INTEGER_CST && maybe_warn_shift_overflow (location, op0, op1) && flag_isoc99) int_const = false; } /* Use the type of the value to be shifted. */ result_type = type0; /* Avoid converting op1 to result_type later. */ converted = 1; } break; case EQ_EXPR: case NE_EXPR: if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE) { tree intt; if (!vector_types_compatible_elements_p (type0, type1)) { error_at (location, "comparing vectors with different " "element types"); return error_mark_node; } if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0), TYPE_VECTOR_SUBPARTS (type1))) { error_at (location, "comparing vectors with different " "number of elements"); return error_mark_node; } /* It's not precisely specified how the usual arithmetic conversions apply to the vector types. Here, we use the unsigned type if one of the operands is signed and the other one is unsigned. */ if (TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)) { if (!TYPE_UNSIGNED (type0)) op0 = build1 (VIEW_CONVERT_EXPR, type1, op0); else op1 = build1 (VIEW_CONVERT_EXPR, type0, op1); warning_at (location, OPT_Wsign_compare, "comparison between " "types %qT and %qT", type0, type1); } /* Always construct signed integer vector type. */ intt = c_common_type_for_size (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (type0))), 0); if (!intt) { error_at (location, "could not find an integer type " "of the same size as %qT", TREE_TYPE (type0)); return error_mark_node; } result_type = build_opaque_vector_type (intt, TYPE_VECTOR_SUBPARTS (type0)); converted = 1; ret = build_vec_cmp (resultcode, result_type, op0, op1); goto return_build_binary_op; } if (FLOAT_TYPE_P (type0) || FLOAT_TYPE_P (type1)) warning_at (location, OPT_Wfloat_equal, "comparing floating point with == or != is unsafe"); /* Result of comparison is always int, but don't convert the args to int! */ build_type = integer_type_node; if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == FIXED_POINT_TYPE || code0 == COMPLEX_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == FIXED_POINT_TYPE || code1 == COMPLEX_TYPE)) short_compare = 1; else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1)) { if (TREE_CODE (op0) == ADDR_EXPR && decl_with_nonnull_addr_p (TREE_OPERAND (op0, 0)) && !from_macro_expansion_at (location)) { if (code == EQ_EXPR) warning_at (location, OPT_Waddress, "the comparison will always evaluate as %<false%> " "for the address of %qD will never be NULL", TREE_OPERAND (op0, 0)); else warning_at (location, OPT_Waddress, "the comparison will always evaluate as %<true%> " "for the address of %qD will never be NULL", TREE_OPERAND (op0, 0)); } result_type = type0; } else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0)) { if (TREE_CODE (op1) == ADDR_EXPR && decl_with_nonnull_addr_p (TREE_OPERAND (op1, 0)) && !from_macro_expansion_at (location)) { if (code == EQ_EXPR) warning_at (location, OPT_Waddress, "the comparison will always evaluate as %<false%> " "for the address of %qD will never be NULL", TREE_OPERAND (op1, 0)); else warning_at (location, OPT_Waddress, "the comparison will always evaluate as %<true%> " "for the address of %qD will never be NULL", TREE_OPERAND (op1, 0)); } result_type = type1; } else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE) { tree tt0 = TREE_TYPE (type0); tree tt1 = TREE_TYPE (type1); addr_space_t as0 = TYPE_ADDR_SPACE (tt0); addr_space_t as1 = TYPE_ADDR_SPACE (tt1); addr_space_t as_common = ADDR_SPACE_GENERIC; /* Anything compares with void *. void * compares with anything. Otherwise, the targets must be compatible and both must be object or both incomplete. */ if (comp_target_types (location, type0, type1)) result_type = common_pointer_type (type0, type1); else if (!addr_space_superset (as0, as1, &as_common)) { error_at (location, "comparison of pointers to " "disjoint address spaces"); return error_mark_node; } else if (VOID_TYPE_P (tt0) && !TYPE_ATOMIC (tt0)) { if (pedantic && TREE_CODE (tt1) == FUNCTION_TYPE) pedwarn (location, OPT_Wpedantic, "ISO C forbids " "comparison of %<void *%> with function pointer"); } else if (VOID_TYPE_P (tt1) && !TYPE_ATOMIC (tt1)) { if (pedantic && TREE_CODE (tt0) == FUNCTION_TYPE) pedwarn (location, OPT_Wpedantic, "ISO C forbids " "comparison of %<void *%> with function pointer"); } else /* Avoid warning about the volatile ObjC EH puts on decls. */ if (!objc_ok) pedwarn (location, 0, "comparison of distinct pointer types lacks a cast"); if (result_type == NULL_TREE) { int qual = ENCODE_QUAL_ADDR_SPACE (as_common); result_type = build_pointer_type (build_qualified_type (void_type_node, qual)); } } else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) { result_type = type0; pedwarn (location, 0, "comparison between pointer and integer"); } else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE) { result_type = type1; pedwarn (location, 0, "comparison between pointer and integer"); } if ((TREE_CODE (TREE_TYPE (orig_op0)) == BOOLEAN_TYPE || truth_value_p (TREE_CODE (orig_op0))) ^ (TREE_CODE (TREE_TYPE (orig_op1)) == BOOLEAN_TYPE || truth_value_p (TREE_CODE (orig_op1)))) maybe_warn_bool_compare (location, code, orig_op0, orig_op1); break; case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE) { tree intt; if (!vector_types_compatible_elements_p (type0, type1)) { error_at (location, "comparing vectors with different " "element types"); return error_mark_node; } if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0), TYPE_VECTOR_SUBPARTS (type1))) { error_at (location, "comparing vectors with different " "number of elements"); return error_mark_node; } /* It's not precisely specified how the usual arithmetic conversions apply to the vector types. Here, we use the unsigned type if one of the operands is signed and the other one is unsigned. */ if (TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)) { if (!TYPE_UNSIGNED (type0)) op0 = build1 (VIEW_CONVERT_EXPR, type1, op0); else op1 = build1 (VIEW_CONVERT_EXPR, type0, op1); warning_at (location, OPT_Wsign_compare, "comparison between " "types %qT and %qT", type0, type1); } /* Always construct signed integer vector type. */ intt = c_common_type_for_size (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (type0))), 0); if (!intt) { error_at (location, "could not find an integer type " "of the same size as %qT", TREE_TYPE (type0)); return error_mark_node; } result_type = build_opaque_vector_type (intt, TYPE_VECTOR_SUBPARTS (type0)); converted = 1; ret = build_vec_cmp (resultcode, result_type, op0, op1); goto return_build_binary_op; } build_type = integer_type_node; if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == FIXED_POINT_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == FIXED_POINT_TYPE)) short_compare = 1; else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE) { addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (type0)); addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1)); addr_space_t as_common; if (comp_target_types (location, type0, type1)) { result_type = common_pointer_type (type0, type1); if (!COMPLETE_TYPE_P (TREE_TYPE (type0)) != !COMPLETE_TYPE_P (TREE_TYPE (type1))) pedwarn (location, 0, "comparison of complete and incomplete pointers"); else if (TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE) pedwarn (location, OPT_Wpedantic, "ISO C forbids " "ordered comparisons of pointers to functions"); else if (null_pointer_constant_p (orig_op0) || null_pointer_constant_p (orig_op1)) warning_at (location, OPT_Wextra, "ordered comparison of pointer with null pointer"); } else if (!addr_space_superset (as0, as1, &as_common)) { error_at (location, "comparison of pointers to " "disjoint address spaces"); return error_mark_node; } else { int qual = ENCODE_QUAL_ADDR_SPACE (as_common); result_type = build_pointer_type (build_qualified_type (void_type_node, qual)); pedwarn (location, 0, "comparison of distinct pointer types lacks a cast"); } } else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1)) { result_type = type0; if (pedantic) pedwarn (location, OPT_Wpedantic, "ordered comparison of pointer with integer zero"); else if (extra_warnings) warning_at (location, OPT_Wextra, "ordered comparison of pointer with integer zero"); } else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0)) { result_type = type1; if (pedantic) pedwarn (location, OPT_Wpedantic, "ordered comparison of pointer with integer zero"); else if (extra_warnings) warning_at (location, OPT_Wextra, "ordered comparison of pointer with integer zero"); } else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) { result_type = type0; pedwarn (location, 0, "comparison between pointer and integer"); } else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE) { result_type = type1; pedwarn (location, 0, "comparison between pointer and integer"); } if ((code0 == POINTER_TYPE || code1 == POINTER_TYPE) && sanitize_flags_p (SANITIZE_POINTER_COMPARE)) { op0 = save_expr (op0); op1 = save_expr (op1); tree tt = builtin_decl_explicit (BUILT_IN_ASAN_POINTER_COMPARE); instrument_expr = build_call_expr_loc (location, tt, 2, op0, op1); } if ((TREE_CODE (TREE_TYPE (orig_op0)) == BOOLEAN_TYPE || truth_value_p (TREE_CODE (orig_op0))) ^ (TREE_CODE (TREE_TYPE (orig_op1)) == BOOLEAN_TYPE || truth_value_p (TREE_CODE (orig_op1)))) maybe_warn_bool_compare (location, code, orig_op0, orig_op1); break; default: gcc_unreachable (); } if (code0 == ERROR_MARK || code1 == ERROR_MARK) return error_mark_node; if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE && (!tree_int_cst_equal (TYPE_SIZE (type0), TYPE_SIZE (type1)) || !vector_types_compatible_elements_p (type0, type1))) { gcc_rich_location richloc (location); richloc.maybe_add_expr (orig_op0); richloc.maybe_add_expr (orig_op1); binary_op_error (&richloc, code, type0, type1); return error_mark_node; } if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE || code0 == FIXED_POINT_TYPE || code0 == VECTOR_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE || code1 == FIXED_POINT_TYPE || code1 == VECTOR_TYPE)) { bool first_complex = (code0 == COMPLEX_TYPE); bool second_complex = (code1 == COMPLEX_TYPE); int none_complex = (!first_complex && !second_complex); if (shorten || common || short_compare) { result_type = c_common_type (type0, type1); do_warn_double_promotion (result_type, type0, type1, "implicit conversion from %qT to %qT " "to match other operand of binary " "expression", location); if (result_type == error_mark_node) return error_mark_node; } if (first_complex != second_complex && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR || (code == TRUNC_DIV_EXPR && first_complex)) && TREE_CODE (TREE_TYPE (result_type)) == REAL_TYPE && flag_signed_zeros) { /* An operation on mixed real/complex operands must be handled specially, but the language-independent code can more easily optimize the plain complex arithmetic if -fno-signed-zeros. */ tree real_type = TREE_TYPE (result_type); tree real, imag; if (type0 != orig_type0 || type1 != orig_type1) { gcc_assert (may_need_excess_precision && common); semantic_result_type = c_common_type (orig_type0, orig_type1); } if (first_complex) { if (TREE_TYPE (op0) != result_type) op0 = convert_and_check (location, result_type, op0); if (TREE_TYPE (op1) != real_type) op1 = convert_and_check (location, real_type, op1); } else { if (TREE_TYPE (op0) != real_type) op0 = convert_and_check (location, real_type, op0); if (TREE_TYPE (op1) != result_type) op1 = convert_and_check (location, result_type, op1); } if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK) return error_mark_node; if (first_complex) { op0 = save_expr (op0); real = build_unary_op (EXPR_LOCATION (orig_op0), REALPART_EXPR, op0, true); imag = build_unary_op (EXPR_LOCATION (orig_op0), IMAGPART_EXPR, op0, true); switch (code) { case MULT_EXPR: case TRUNC_DIV_EXPR: op1 = save_expr (op1); imag = build2 (resultcode, real_type, imag, op1); /* Fall through. */ case PLUS_EXPR: case MINUS_EXPR: real = build2 (resultcode, real_type, real, op1); break; default: gcc_unreachable(); } } else { op1 = save_expr (op1); real = build_unary_op (EXPR_LOCATION (orig_op1), REALPART_EXPR, op1, true); imag = build_unary_op (EXPR_LOCATION (orig_op1), IMAGPART_EXPR, op1, true); switch (code) { case MULT_EXPR: op0 = save_expr (op0); imag = build2 (resultcode, real_type, op0, imag); /* Fall through. */ case PLUS_EXPR: real = build2 (resultcode, real_type, op0, real); break; case MINUS_EXPR: real = build2 (resultcode, real_type, op0, real); imag = build1 (NEGATE_EXPR, real_type, imag); break; default: gcc_unreachable(); } } ret = build2 (COMPLEX_EXPR, result_type, real, imag); goto return_build_binary_op; } /* For certain operations (which identify themselves by shorten != 0) if both args were extended from the same smaller type, do the arithmetic in that type and then extend. shorten !=0 and !=1 indicates a bitwise operation. For them, this optimization is safe only if both args are zero-extended or both are sign-extended. Otherwise, we might change the result. Eg, (short)-1 | (unsigned short)-1 is (int)-1 but calculated in (unsigned short) it would be (unsigned short)-1. */ if (shorten && none_complex) { final_type = result_type; result_type = shorten_binary_op (result_type, op0, op1, shorten == -1); } /* Shifts can be shortened if shifting right. */ if (short_shift) { int unsigned_arg; tree arg0 = get_narrower (op0, &unsigned_arg); final_type = result_type; if (arg0 == op0 && final_type == TREE_TYPE (op0)) unsigned_arg = TYPE_UNSIGNED (TREE_TYPE (op0)); if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type) && tree_int_cst_sgn (op1) > 0 /* We can shorten only if the shift count is less than the number of bits in the smaller type size. */ && compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (arg0))) < 0 /* We cannot drop an unsigned shift after sign-extension. */ && (!TYPE_UNSIGNED (final_type) || unsigned_arg)) { /* Do an unsigned shift if the operand was zero-extended. */ result_type = c_common_signed_or_unsigned_type (unsigned_arg, TREE_TYPE (arg0)); /* Convert value-to-be-shifted to that type. */ if (TREE_TYPE (op0) != result_type) op0 = convert (result_type, op0); converted = 1; } } /* Comparison operations are shortened too but differently. They identify themselves by setting short_compare = 1. */ if (short_compare) { /* Don't write &op0, etc., because that would prevent op0 from being kept in a register. Instead, make copies of the our local variables and pass the copies by reference, then copy them back afterward. */ tree xop0 = op0, xop1 = op1, xresult_type = result_type; enum tree_code xresultcode = resultcode; tree val = shorten_compare (location, &xop0, &xop1, &xresult_type, &xresultcode); if (val != NULL_TREE) { ret = val; goto return_build_binary_op; } op0 = xop0, op1 = xop1; converted = 1; resultcode = xresultcode; if (c_inhibit_evaluation_warnings == 0) { bool op0_maybe_const = true; bool op1_maybe_const = true; tree orig_op0_folded, orig_op1_folded; if (in_late_binary_op) { orig_op0_folded = orig_op0; orig_op1_folded = orig_op1; } else { /* Fold for the sake of possible warnings, as in build_conditional_expr. This requires the "original" values to be folded, not just op0 and op1. */ c_inhibit_evaluation_warnings++; op0 = c_fully_fold (op0, require_constant_value, &op0_maybe_const); op1 = c_fully_fold (op1, require_constant_value, &op1_maybe_const); c_inhibit_evaluation_warnings--; orig_op0_folded = c_fully_fold (orig_op0, require_constant_value, NULL); orig_op1_folded = c_fully_fold (orig_op1, require_constant_value, NULL); } if (warn_sign_compare) warn_for_sign_compare (location, orig_op0_folded, orig_op1_folded, op0, op1, result_type, resultcode); if (!in_late_binary_op && !int_operands) { if (!op0_maybe_const || TREE_CODE (op0) != INTEGER_CST) op0 = c_wrap_maybe_const (op0, !op0_maybe_const); if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST) op1 = c_wrap_maybe_const (op1, !op1_maybe_const); } } } } /* At this point, RESULT_TYPE must be nonzero to avoid an error message. If CONVERTED is zero, both args will be converted to type RESULT_TYPE. Then the expression will be built. It will be given type FINAL_TYPE if that is nonzero; otherwise, it will be given type RESULT_TYPE. */ if (!result_type) { gcc_rich_location richloc (location); richloc.maybe_add_expr (orig_op0); richloc.maybe_add_expr (orig_op1); binary_op_error (&richloc, code, TREE_TYPE (op0), TREE_TYPE (op1)); return error_mark_node; } if (build_type == NULL_TREE) { build_type = result_type; if ((type0 != orig_type0 || type1 != orig_type1) && !boolean_op) { gcc_assert (may_need_excess_precision && common); semantic_result_type = c_common_type (orig_type0, orig_type1); } } if (!converted) { op0 = ep_convert_and_check (location, result_type, op0, semantic_result_type); op1 = ep_convert_and_check (location, result_type, op1, semantic_result_type); /* This can happen if one operand has a vector type, and the other has a different type. */ if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK) return error_mark_node; } if (sanitize_flags_p ((SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_FLOAT_DIVIDE)) && current_function_decl != NULL_TREE && (doing_div_or_mod || doing_shift) && !require_constant_value) { /* OP0 and/or OP1 might have side-effects. */ op0 = save_expr (op0); op1 = save_expr (op1); op0 = c_fully_fold (op0, false, NULL); op1 = c_fully_fold (op1, false, NULL); if (doing_div_or_mod && (sanitize_flags_p ((SANITIZE_DIVIDE | SANITIZE_FLOAT_DIVIDE)))) instrument_expr = ubsan_instrument_division (location, op0, op1); else if (doing_shift && sanitize_flags_p (SANITIZE_SHIFT)) instrument_expr = ubsan_instrument_shift (location, code, op0, op1); } /* Treat expressions in initializers specially as they can't trap. */ if (int_const_or_overflow) ret = (require_constant_value ? fold_build2_initializer_loc (location, resultcode, build_type, op0, op1) : fold_build2_loc (location, resultcode, build_type, op0, op1)); else ret = build2 (resultcode, build_type, op0, op1); if (final_type != NULL_TREE) ret = convert (final_type, ret); return_build_binary_op: gcc_assert (ret != error_mark_node); if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret) && !int_const) ret = (int_operands ? note_integer_operands (ret) : build1 (NOP_EXPR, TREE_TYPE (ret), ret)); else if (TREE_CODE (ret) != INTEGER_CST && int_operands && !in_late_binary_op) ret = note_integer_operands (ret); protected_set_expr_location (ret, location); if (instrument_expr != NULL) ret = fold_build2 (COMPOUND_EXPR, TREE_TYPE (ret), instrument_expr, ret); if (semantic_result_type) ret = build1_loc (location, EXCESS_PRECISION_EXPR, semantic_result_type, ret); return ret; } /* Convert EXPR to be a truth-value, validating its type for this purpose. LOCATION is the source location for the expression. */ tree c_objc_common_truthvalue_conversion (location_t location, tree expr) { bool int_const, int_operands; switch (TREE_CODE (TREE_TYPE (expr))) { case ARRAY_TYPE: error_at (location, "used array that cannot be converted to pointer where scalar is required"); return error_mark_node; case RECORD_TYPE: error_at (location, "used struct type value where scalar is required"); return error_mark_node; case UNION_TYPE: error_at (location, "used union type value where scalar is required"); return error_mark_node; case VOID_TYPE: error_at (location, "void value not ignored as it ought to be"); return error_mark_node; case POINTER_TYPE: if (reject_gcc_builtin (expr)) return error_mark_node; break; case FUNCTION_TYPE: gcc_unreachable (); case VECTOR_TYPE: error_at (location, "used vector type where scalar is required"); return error_mark_node; default: break; } int_const = (TREE_CODE (expr) == INTEGER_CST && !TREE_OVERFLOW (expr)); int_operands = EXPR_INT_CONST_OPERANDS (expr); if (int_operands && TREE_CODE (expr) != INTEGER_CST) { expr = remove_c_maybe_const_expr (expr); expr = build2 (NE_EXPR, integer_type_node, expr, convert (TREE_TYPE (expr), integer_zero_node)); expr = note_integer_operands (expr); } else /* ??? Should we also give an error for vectors rather than leaving those to give errors later? */ expr = c_common_truthvalue_conversion (location, expr); if (TREE_CODE (expr) == INTEGER_CST && int_operands && !int_const) { if (TREE_OVERFLOW (expr)) return expr; else return note_integer_operands (expr); } if (TREE_CODE (expr) == INTEGER_CST && !int_const) return build1 (NOP_EXPR, TREE_TYPE (expr), expr); return expr; } /* Convert EXPR to a contained DECL, updating *TC, *TI and *SE as required. */ tree c_expr_to_decl (tree expr, bool *tc ATTRIBUTE_UNUSED, bool *se) { if (TREE_CODE (expr) == COMPOUND_LITERAL_EXPR) { tree decl = COMPOUND_LITERAL_EXPR_DECL (expr); /* Executing a compound literal inside a function reinitializes it. */ if (!TREE_STATIC (decl)) *se = true; return decl; } else return expr; } /* Generate OMP construct CODE, with BODY and CLAUSES as its compound statement. LOC is the location of the construct. */ tree c_finish_omp_construct (location_t loc, enum tree_code code, tree body, tree clauses) { body = c_end_compound_stmt (loc, body, true); tree stmt = make_node (code); TREE_TYPE (stmt) = void_type_node; OMP_BODY (stmt) = body; OMP_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_DATA. */ tree c_finish_oacc_data (location_t loc, tree clauses, tree block) { tree stmt; block = c_end_compound_stmt (loc, block, true); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_HOST_DATA. */ tree c_finish_oacc_host_data (location_t loc, tree clauses, tree block) { tree stmt; block = c_end_compound_stmt (loc, block, true); stmt = make_node (OACC_HOST_DATA); TREE_TYPE (stmt) = void_type_node; OACC_HOST_DATA_CLAUSES (stmt) = clauses; OACC_HOST_DATA_BODY (stmt) = block; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Like c_begin_compound_stmt, except force the retention of the BLOCK. */ tree c_begin_omp_parallel (void) { tree block; keep_next_level (); block = c_begin_compound_stmt (true); return block; } /* Generate OMP_PARALLEL, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OMP_PARALLEL. */ tree c_finish_omp_parallel (location_t loc, tree clauses, tree block) { tree stmt; block = c_end_compound_stmt (loc, block, true); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = block; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Like c_begin_compound_stmt, except force the retention of the BLOCK. */ tree c_begin_omp_task (void) { tree block; keep_next_level (); block = c_begin_compound_stmt (true); return block; } /* Generate OMP_TASK, with CLAUSES and BLOCK as its compound statement. LOC is the location of the #pragma. */ tree c_finish_omp_task (location_t loc, tree clauses, tree block) { tree stmt; block = c_end_compound_stmt (loc, block, true); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = block; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Generate GOMP_cancel call for #pragma omp cancel. */ void c_finish_omp_cancel (location_t loc, tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error_at (loc, "%<#pragma omp cancel%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> " "clauses"); return; } tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc)); ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc), build_zero_cst (type)); } else ifc = boolean_true_node; tree stmt = build_call_expr_loc (loc, fn, 2, build_int_cst (integer_type_node, mask), ifc); add_stmt (stmt); } /* Generate GOMP_cancellation_point call for #pragma omp cancellation point. */ void c_finish_omp_cancellation_point (location_t loc, tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error_at (loc, "%<#pragma omp cancellation point%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> " "clauses"); return; } tree stmt = build_call_expr_loc (loc, fn, 1, build_int_cst (integer_type_node, mask)); add_stmt (stmt); } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one known not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one, enum c_omp_region_type ort) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; ret = t; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TYPE_ATOMIC (strip_array_types (TREE_TYPE (t)))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (t) == COMPONENT_REF && ort == C_ORT_OMP && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM)) { if (DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); return error_mark_node; } t = TREE_OPERAND (t, 0); } } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TYPE_ATOMIC (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qD in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && VAR_P (t) && DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && TYPE_ATOMIC (TREE_TYPE (t)) && POINTER_TYPE_P (TREE_TYPE (t))) { /* If the array section is pointer based and the pointer itself is _Atomic qualified, we need to atomically load the pointer. */ c_expr expr; memset (&expr, 0, sizeof (expr)); expr.value = ret; expr = convert_lvalue_to_rvalue (OMP_CLAUSE_LOCATION (c), expr, false, false); ret = expr.value; } return ret; } ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one, ort); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (length != NULL_TREE) { if (!integer_nonzerop (length)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (integer_zerop (length)) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else maybe_zero_len = true; } if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type))); size = size_binop (PLUS_EXPR, size, size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } maybe_zero_len = true; } else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TREE_CODE (type) == POINTER_TYPE) { if (length == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } ret = build_array_ref (OMP_CLAUSE_LOCATION (c), ret, low_bound); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c, enum c_omp_region_type ort) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree, 10> types; tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types, maybe_zero_len, first_non_one, ort); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = OMP_CLAUSE_DECL (c); tree tem = NULL_TREE; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); first = c_fully_fold (first, false, NULL, true); OMP_CLAUSE_DECL (c) = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else { tree l; if (i > first_non_one && ((length && integer_nonzerop (length)) || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); tree eltype = TREE_TYPE (types[num - 1]); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (integer_zerop (size) || integer_zerop (size_in_bytes (eltype))) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } size = size_binop (EXACT_DIV_EXPR, size, size_in_bytes (eltype)); } size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { size = size_binop (MINUS_EXPR, size, size_one_node); size = c_fully_fold (size, false, NULL); tree index_type = build_index_type (size); tree eltype = TREE_TYPE (first); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); tree type = build_array_type (eltype, index_type); tree ptype = build_pointer_type (eltype); if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); tree t2 = build_fold_addr_expr (first); t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t2); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t2, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t)); t2 = c_fully_fold (t2, false, NULL); if (tree_fits_shwi_p (t2)) t = build2 (MEM_REF, type, t, build_int_cst (ptype, tree_to_shwi (t2))); else { t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, t2); t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR, TREE_TYPE (t), t, t2); t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0)); } OMP_CLAUSE_DECL (c) = t; return false; } first = c_fully_fold (first, false, NULL); OMP_CLAUSE_DECL (c) = first; if (size) size = c_fully_fold (size, false, NULL); OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)) return false; gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR); if (ort == C_ORT_OMP || ort == C_ORT_ACC) switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_ALLOC: case GOMP_MAP_TO: case GOMP_MAP_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_ALWAYS_TOFROM: case GOMP_MAP_RELEASE: case GOMP_MAP_DELETE: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_FORCE_PRESENT: OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1; break; default: break; } tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if (ort != C_ORT_OMP && ort != C_ORT_ACC) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); else if (TREE_CODE (t) == COMPONENT_REF) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER); else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && !c_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); if (!POINTER_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); t = c_fully_fold (t, false, NULL); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; } return false; } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree c_clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of c_finish_omp_clauses, called via walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree c_find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* For all elements of CLAUSES, validate them against their constraints. Remove any elements from the list that are invalid. */ tree c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head; tree c, t, type, *pc; tree simdlen = NULL_TREE, safelen = NULL_TREE; bool branch_seen = false; bool copyprivate_seen = false; bool linear_variable_step_check = false; tree *nowait_clause = NULL; bool ordered_seen = false; tree schedule_clause = NULL_TREE; bool oacc_async = false; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); bitmap_initialize (&map_head, &bitmap_default_obstack); bitmap_initialize (&map_field_head, &bitmap_default_obstack); bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack); if (ort & C_ORT_ACC) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC) { oacc_async = true; break; } for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; bool need_complete = false; bool need_implicitly_determined = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; goto check_dup_generic; case OMP_CLAUSE_PRIVATE: need_complete = true; need_implicitly_determined = true; goto check_dup_generic; case OMP_CLAUSE_REDUCTION: need_implicitly_determined = true; t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) { remove = true; break; } t = OMP_CLAUSE_DECL (c); } t = require_complete_type (OMP_CLAUSE_LOCATION (c), t); if (t == error_mark_node) { remove = true; break; } if (oacc_async) c_mark_addressable (t); type = TREE_TYPE (t); if (TREE_CODE (t) == MEM_REF) type = TREE_TYPE (type); if (TREE_CODE (type) == ARRAY_TYPE) { tree oatype = type; gcc_assert (TREE_CODE (t) != MEM_REF); while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); if (integer_zerop (TYPE_SIZE_UNIT (type))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD in %<reduction%> clause is a zero size array", t); remove = true; break; } tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype), TYPE_SIZE_UNIT (type)); if (integer_zerop (size)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD in %<reduction%> clause is a zero size array", t); remove = true; break; } size = size_binop (MINUS_EXPR, size, size_one_node); tree index_type = build_index_type (size); tree atype = build_array_type (type, index_type); tree ptype = build_pointer_type (type); if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0)); OMP_CLAUSE_DECL (c) = t; } if (TYPE_ATOMIC (type)) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %<reduction%> clause", t); remove = true; break; } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE && (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)) { enum tree_code r_code = OMP_CLAUSE_REDUCTION_CODE (c); const char *r_name = NULL; switch (r_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: break; case MIN_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) r_name = "min"; break; case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) r_name = "max"; break; case BIT_AND_EXPR: r_name = "&"; break; case BIT_XOR_EXPR: r_name = "^"; break; case BIT_IOR_EXPR: r_name = "|"; break; case TRUTH_ANDIF_EXPR: if (FLOAT_TYPE_P (type)) r_name = "&&"; break; case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) r_name = "||"; break; default: gcc_unreachable (); } if (r_name) { error_at (OMP_CLAUSE_LOCATION (c), "%qE has invalid type for %<reduction(%s)%>", t, r_name); remove = true; break; } } else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == error_mark_node) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction not found for %qE", t); remove = true; break; } else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree list = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (type); tree placeholder = build_decl (OMP_CLAUSE_LOCATION (c), VAR_DECL, NULL_TREE, type); tree decl_placeholder = NULL_TREE; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; if (TREE_CODE (t) == MEM_REF) { decl_placeholder = build_decl (OMP_CLAUSE_LOCATION (c), VAR_DECL, NULL_TREE, type); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder; DECL_ARTIFICIAL (decl_placeholder) = 1; DECL_IGNORED_P (decl_placeholder) = 1; } if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 0))) c_mark_addressable (placeholder); if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 1))) c_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); OMP_CLAUSE_REDUCTION_MERGE (c) = c_clone_omp_udr (TREE_VEC_ELT (list, 2), TREE_VEC_ELT (list, 0), TREE_VEC_ELT (list, 1), decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c), placeholder); OMP_CLAUSE_REDUCTION_MERGE (c) = build3_loc (OMP_CLAUSE_LOCATION (c), BIND_EXPR, void_type_node, NULL_TREE, OMP_CLAUSE_REDUCTION_MERGE (c), NULL_TREE); TREE_SIDE_EFFECTS (OMP_CLAUSE_REDUCTION_MERGE (c)) = 1; if (TREE_VEC_LENGTH (list) == 6) { if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 3))) c_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 4))) c_mark_addressable (placeholder); tree init = TREE_VEC_ELT (list, 5); if (init == error_mark_node) init = DECL_INITIAL (TREE_VEC_ELT (list, 3)); OMP_CLAUSE_REDUCTION_INIT (c) = c_clone_omp_udr (init, TREE_VEC_ELT (list, 4), TREE_VEC_ELT (list, 3), decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c), placeholder); if (TREE_VEC_ELT (list, 5) == error_mark_node) { tree v = decl_placeholder ? decl_placeholder : t; OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, OMP_CLAUSE_REDUCTION_INIT (c)); } if (walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), c_find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else { tree init; tree v = decl_placeholder ? decl_placeholder : t; if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } OMP_CLAUSE_REDUCTION_INIT (c) = build3_loc (OMP_CLAUSE_LOCATION (c), BIND_EXPR, void_type_node, NULL_TREE, OMP_CLAUSE_REDUCTION_INIT (c), NULL_TREE); TREE_SIDE_EFFECTS (OMP_CLAUSE_REDUCTION_INIT (c)) = 1; } if (TREE_CODE (t) == MEM_REF) { if (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))) == NULL_TREE || TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t)))) != INTEGER_CST) { sorry ("variable length element type in array " "%<reduction%> clause"); remove = true; break; } t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == POINTER_PLUS_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == ADDR_EXPR) t = TREE_OPERAND (t, 0); } goto check_dup_generic_t; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; if (nowait_clause) { error_at (OMP_CLAUSE_LOCATION (*nowait_clause), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *nowait_clause = OMP_CLAUSE_CHAIN (*nowait_clause); nowait_clause = NULL; } goto check_dup_generic; case OMP_CLAUSE_COPYIN: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) || !DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; break; } goto check_dup_generic; case OMP_CLAUSE_LINEAR: if (ort != C_ORT_OMP_DECLARE_SIMD) need_implicitly_determined = true; t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_OMP_DECLARE_SIMD && OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT) { error_at (OMP_CLAUSE_LOCATION (c), "modifier should not be specified in %<linear%> " "clause on %<simd%> or %<for%> constructs"); OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT; } if (!INTEGRAL_TYPE_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "linear clause applied to non-integral non-pointer " "variable with type %qT", TREE_TYPE (t)); remove = true; break; } if (TYPE_ATOMIC (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qD in %<linear%> clause", t); remove = true; break; } if (ort == C_ORT_OMP_DECLARE_SIMD) { tree s = OMP_CLAUSE_LINEAR_STEP (c); if (TREE_CODE (s) == PARM_DECL) { OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1; /* map_head bitmap is used as uniform_head if declare_simd. */ if (!bitmap_bit_p (&map_head, DECL_UID (s))) linear_variable_step_check = true; goto check_dup_generic; } if (TREE_CODE (s) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step %qE is neither constant " "nor a parameter", s); remove = true; break; } } if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == POINTER_TYPE) { tree s = OMP_CLAUSE_LINEAR_STEP (c); s = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, OMP_CLAUSE_DECL (c), s); s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, s), fold_convert (sizetype, OMP_CLAUSE_DECL (c))); if (s == error_mark_node) s = size_one_node; OMP_CLAUSE_LINEAR_STEP (c) = s; } else OMP_CLAUSE_LINEAR_STEP (c) = fold_convert (TREE_TYPE (t), OMP_CLAUSE_LINEAR_STEP (c)); goto check_dup_generic; check_dup_generic: t = OMP_CLAUSE_DECL (c); check_dup_generic_t: if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (ort == C_ORT_ACC && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t))) { error ("%qD appears more than once in reduction clauses", t); remove = true; } else bitmap_set_bit (&oacc_reduction_head, DECL_UID (t)); } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once in data clauses", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_FIRSTPRIVATE: t = OMP_CLAUSE_DECL (c); need_complete = true; need_implicitly_determined = true; if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_LASTPRIVATE: t = OMP_CLAUSE_DECL (c); need_complete = true; need_implicitly_determined = true; if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!POINTER_TYPE_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array", t); remove = true; } else if (TYPE_ATOMIC (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qD in %<aligned%> clause", t); remove = true; break; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (t == NULL_TREE) { gcc_assert (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE); break; } if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) { gcc_assert (TREE_CODE (t) == TREE_LIST); for (; t; t = TREE_CHAIN (t)) { tree decl = TREE_VALUE (t); if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { tree offset = TREE_PURPOSE (t); bool neg = wi::neg_p (wi::to_wide (offset)); offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset); tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (c), neg ? MINUS_EXPR : PLUS_EXPR, decl, offset); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t2), fold_convert (sizetype, decl)); if (t2 == error_mark_node) { remove = true; break; } TREE_PURPOSE (t) = t2; } } break; } if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; break; } if (t == error_mark_node) remove = true; else if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %<depend%> clause", t); remove = true; } else if (!c_mark_addressable (t)) remove = true; break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TYPE_ATOMIC (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } while (TREE_CODE (t) == ARRAY_REF) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { while (TREE_CODE (t) == COMPONENT_REF) t = TREE_OPERAND (t, 0); if (bitmap_bit_p (&map_field_head, DECL_UID (t))) break; if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion" " clauses", t); else if (ort == C_ORT_ACC) error ("%qD appears more than once in data" " clauses", t); else error ("%qD appears more than once in map" " clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); bitmap_set_bit (&map_field_head, DECL_UID (t)); } } } break; } if (t == error_mark_node) { remove = true; break; } if (TREE_CODE (t) == COMPONENT_REF && (ort & C_ORT_OMP) && OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_) { if (DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TYPE_ATOMIC (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); remove = true; break; } t = TREE_OPERAND (t, 0); } if (remove) break; if (VAR_P (t) || TREE_CODE (t) == PARM_DECL) { if (bitmap_bit_p (&map_field_head, DECL_UID (t))) break; } } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (VAR_P (t) && DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER)) && !c_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FORCE_DEVICEPTR))) && t == OMP_CLAUSE_DECL (c) && !lang_hooks.types.omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TREE_TYPE (t) == error_mark_node) remove = true; else if (TYPE_ATOMIC (strip_array_types (TREE_TYPE (t)))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) { if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion clauses", t); else if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears more than once in map clauses", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); if (t != OMP_CLAUSE_DECL (c) && TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF) bitmap_set_bit (&map_field_head, DECL_UID (t)); } break; case OMP_CLAUSE_TO_DECLARE: case OMP_CLAUSE_LINK: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == FUNCTION_DECL && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) ; else if (!VAR_P (t)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) error_at (OMP_CLAUSE_LOCATION (c), "%qE is neither a variable nor a function name in " "clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } if (remove) break; if (bitmap_bit_p (&generic_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once on the same " "%<declare target%> directive", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an argument in %<uniform%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not an argument in %<uniform%> clause", t); remove = true; break; } /* map_head bitmap is used as uniform_head if declare_simd. */ bitmap_set_bit (&map_head, DECL_UID (t)); goto check_dup_generic; case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_PTR: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qs variable is neither a pointer nor an array", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } goto check_dup_generic; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); remove = true; break; } nowait_clause = pc; pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_HINT: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_TILE: pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_SCHEDULE: if (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) { const char *p = NULL; switch (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK) { case OMP_CLAUSE_SCHEDULE_STATIC: p = "static"; break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: break; case OMP_CLAUSE_SCHEDULE_GUIDED: break; case OMP_CLAUSE_SCHEDULE_AUTO: p = "auto"; break; case OMP_CLAUSE_SCHEDULE_RUNTIME: p = "runtime"; break; default: gcc_unreachable (); } if (p) { error_at (OMP_CLAUSE_LOCATION (c), "%<nonmonotonic%> modifier specified for %qs " "schedule kind", p); OMP_CLAUSE_SCHEDULE_KIND (c) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c) & ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC); } } schedule_clause = c; pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_ORDERED: ordered_seen = true; pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_SAFELEN: safelen = c; pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_SIMDLEN: simdlen = c; pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; break; } branch_seen = true; pc = &OMP_CLAUSE_CHAIN (c); continue; default: gcc_unreachable (); } if (!remove) { t = OMP_CLAUSE_DECL (c); if (need_complete) { t = require_complete_type (OMP_CLAUSE_LOCATION (c), t); if (t == error_mark_node) remove = true; } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (c_omp_predetermined_sharing (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: /* const vars may be specified in firstprivate clause. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && TREE_READONLY (t)) break; share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is predetermined %qs for %qs", t, share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } if (simdlen && safelen && tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen), OMP_CLAUSE_SIMDLEN_EXPR (simdlen))) { error_at (OMP_CLAUSE_LOCATION (simdlen), "%<simdlen%> clause value is bigger than " "%<safelen%> clause value"); OMP_CLAUSE_SIMDLEN_EXPR (simdlen) = OMP_CLAUSE_SAFELEN_EXPR (safelen); } if (ordered_seen && schedule_clause && (OMP_CLAUSE_SCHEDULE_KIND (schedule_clause) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) { error_at (OMP_CLAUSE_LOCATION (schedule_clause), "%<nonmonotonic%> schedule modifier specified together " "with %<ordered%> clause"); OMP_CLAUSE_SCHEDULE_KIND (schedule_clause) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (schedule_clause) & ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC); } if (linear_variable_step_check) for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) && !bitmap_bit_p (&map_head, DECL_UID (OMP_CLAUSE_LINEAR_STEP (c)))) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step is a parameter %qD not " "specified in %<uniform%> clause", OMP_CLAUSE_LINEAR_STEP (c)); remove = true; } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* Return code to initialize DST with a copy constructor from SRC. C doesn't have copy constructors nor assignment operators, only for _Atomic vars we need to perform __atomic_load from src into a temporary followed by __atomic_store of the temporary to dst. */ tree c_omp_clause_copy_ctor (tree clause, tree dst, tree src) { if (!really_atomic_lvalue (dst) && !really_atomic_lvalue (src)) return build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src); location_t loc = OMP_CLAUSE_LOCATION (clause); tree type = TREE_TYPE (dst); tree nonatomic_type = build_qualified_type (type, TYPE_UNQUALIFIED); tree tmp = create_tmp_var (nonatomic_type); tree tmp_addr = build_fold_addr_expr (tmp); TREE_ADDRESSABLE (tmp) = 1; TREE_NO_WARNING (tmp) = 1; tree src_addr = build_fold_addr_expr (src); tree dst_addr = build_fold_addr_expr (dst); tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST); vec<tree, va_gc> *params; /* Expansion of a generic atomic load may require an addition element, so allocate enough to prevent a resize. */ vec_alloc (params, 4); /* Build __atomic_load (&src, &tmp, SEQ_CST); */ tree fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD); params->quick_push (src_addr); params->quick_push (tmp_addr); params->quick_push (seq_cst); tree load = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); vec_alloc (params, 4); /* Build __atomic_store (&dst, &tmp, SEQ_CST); */ fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_STORE); params->quick_push (dst_addr); params->quick_push (tmp_addr); params->quick_push (seq_cst); tree store = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL); return build2 (COMPOUND_EXPR, void_type_node, load, store); } /* Create a transaction node. */ tree c_finish_transaction (location_t loc, tree block, int flags) { tree stmt = build_stmt (loc, TRANSACTION_EXPR, block); if (flags & TM_STMT_ATTR_OUTER) TRANSACTION_EXPR_OUTER (stmt) = 1; if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (stmt) = 1; return add_stmt (stmt); } /* Make a variant type in the proper way for C/C++, propagating qualifiers down to the element type of an array. If ORIG_QUAL_TYPE is not NULL, then it should be used as the qualified type ORIG_QUAL_INDIRECT levels down in array type derivation (to preserve information about the typedef name from which an array type was derived). */ tree c_build_qualified_type (tree type, int type_quals, tree orig_qual_type, size_t orig_qual_indirect) { if (type == error_mark_node) return type; if (TREE_CODE (type) == ARRAY_TYPE) { tree t; tree element_type = c_build_qualified_type (TREE_TYPE (type), type_quals, orig_qual_type, orig_qual_indirect - 1); /* See if we already have an identically qualified type. */ if (orig_qual_type && orig_qual_indirect == 0) t = orig_qual_type; else for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { if (TYPE_QUALS (strip_array_types (t)) == type_quals && TYPE_NAME (t) == TYPE_NAME (type) && TYPE_CONTEXT (t) == TYPE_CONTEXT (type) && attribute_list_equal (TYPE_ATTRIBUTES (t), TYPE_ATTRIBUTES (type))) break; } if (!t) { tree domain = TYPE_DOMAIN (type); t = build_variant_type_copy (type); TREE_TYPE (t) = element_type; if (TYPE_STRUCTURAL_EQUALITY_P (element_type) || (domain && TYPE_STRUCTURAL_EQUALITY_P (domain))) SET_TYPE_STRUCTURAL_EQUALITY (t); else if (TYPE_CANONICAL (element_type) != element_type || (domain && TYPE_CANONICAL (domain) != domain)) { tree unqualified_canon = build_array_type (TYPE_CANONICAL (element_type), domain? TYPE_CANONICAL (domain) : NULL_TREE); if (TYPE_REVERSE_STORAGE_ORDER (type)) { unqualified_canon = build_distinct_type_copy (unqualified_canon); TYPE_REVERSE_STORAGE_ORDER (unqualified_canon) = 1; } TYPE_CANONICAL (t) = c_build_qualified_type (unqualified_canon, type_quals); } else TYPE_CANONICAL (t) = t; } return t; } /* A restrict-qualified pointer type must be a pointer to object or incomplete type. Note that the use of POINTER_TYPE_P also allows REFERENCE_TYPEs, which is appropriate for C++. */ if ((type_quals & TYPE_QUAL_RESTRICT) && (!POINTER_TYPE_P (type) || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type)))) { error ("invalid use of %<restrict%>"); type_quals &= ~TYPE_QUAL_RESTRICT; } tree var_type = (orig_qual_type && orig_qual_indirect == 0 ? orig_qual_type : build_qualified_type (type, type_quals)); /* A variant type does not inherit the list of incomplete vars from the type main variant. */ if (RECORD_OR_UNION_TYPE_P (var_type) && TYPE_MAIN_VARIANT (var_type) != var_type) C_TYPE_INCOMPLETE_VARS (var_type) = 0; return var_type; } /* Build a VA_ARG_EXPR for the C parser. */ tree c_build_va_arg (location_t loc1, tree expr, location_t loc2, tree type) { if (error_operand_p (type)) return error_mark_node; /* VA_ARG_EXPR cannot be used for a scalar va_list with reverse storage order because it takes the address of the expression. */ else if (handled_component_p (expr) && reverse_storage_order_for_component_p (expr)) { error_at (loc1, "cannot use %<va_arg%> with reverse storage order"); return error_mark_node; } else if (!COMPLETE_TYPE_P (type)) { error_at (loc2, "second argument to %<va_arg%> is of incomplete " "type %qT", type); return error_mark_node; } else if (warn_cxx_compat && TREE_CODE (type) == ENUMERAL_TYPE) warning_at (loc2, OPT_Wc___compat, "C++ requires promoted type, not enum type, in %<va_arg%>"); return build_va_arg (loc2, expr, type); } /* Return truthvalue of whether T1 is the same tree structure as T2. Return 1 if they are the same. Return false if they are different. */ bool c_tree_equal (tree t1, tree t2) { enum tree_code code1, code2; if (t1 == t2) return true; if (!t1 || !t2) return false; for (code1 = TREE_CODE (t1); CONVERT_EXPR_CODE_P (code1) || code1 == NON_LVALUE_EXPR; code1 = TREE_CODE (t1)) t1 = TREE_OPERAND (t1, 0); for (code2 = TREE_CODE (t2); CONVERT_EXPR_CODE_P (code2) || code2 == NON_LVALUE_EXPR; code2 = TREE_CODE (t2)) t2 = TREE_OPERAND (t2, 0); /* They might have become equal now. */ if (t1 == t2) return true; if (code1 != code2) return false; switch (code1) { case INTEGER_CST: return wi::to_wide (t1) == wi::to_wide (t2); case REAL_CST: return real_equal (&TREE_REAL_CST (t1), &TREE_REAL_CST (t2)); case STRING_CST: return TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2) && !memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2), TREE_STRING_LENGTH (t1)); case FIXED_CST: return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (t1), TREE_FIXED_CST (t2)); case COMPLEX_CST: return c_tree_equal (TREE_REALPART (t1), TREE_REALPART (t2)) && c_tree_equal (TREE_IMAGPART (t1), TREE_IMAGPART (t2)); case VECTOR_CST: return operand_equal_p (t1, t2, OEP_ONLY_CONST); case CONSTRUCTOR: /* We need to do this when determining whether or not two non-type pointer to member function template arguments are the same. */ if (!comptypes (TREE_TYPE (t1), TREE_TYPE (t2)) || CONSTRUCTOR_NELTS (t1) != CONSTRUCTOR_NELTS (t2)) return false; { tree field, value; unsigned int i; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t1), i, field, value) { constructor_elt *elt2 = CONSTRUCTOR_ELT (t2, i); if (!c_tree_equal (field, elt2->index) || !c_tree_equal (value, elt2->value)) return false; } } return true; case TREE_LIST: if (!c_tree_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2))) return false; if (!c_tree_equal (TREE_VALUE (t1), TREE_VALUE (t2))) return false; return c_tree_equal (TREE_CHAIN (t1), TREE_CHAIN (t2)); case SAVE_EXPR: return c_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); case CALL_EXPR: { tree arg1, arg2; call_expr_arg_iterator iter1, iter2; if (!c_tree_equal (CALL_EXPR_FN (t1), CALL_EXPR_FN (t2))) return false; for (arg1 = first_call_expr_arg (t1, &iter1), arg2 = first_call_expr_arg (t2, &iter2); arg1 && arg2; arg1 = next_call_expr_arg (&iter1), arg2 = next_call_expr_arg (&iter2)) if (!c_tree_equal (arg1, arg2)) return false; if (arg1 || arg2) return false; return true; } case TARGET_EXPR: { tree o1 = TREE_OPERAND (t1, 0); tree o2 = TREE_OPERAND (t2, 0); /* Special case: if either target is an unallocated VAR_DECL, it means that it's going to be unified with whatever the TARGET_EXPR is really supposed to initialize, so treat it as being equivalent to anything. */ if (VAR_P (o1) && DECL_NAME (o1) == NULL_TREE && !DECL_RTL_SET_P (o1)) /*Nop*/; else if (VAR_P (o2) && DECL_NAME (o2) == NULL_TREE && !DECL_RTL_SET_P (o2)) /*Nop*/; else if (!c_tree_equal (o1, o2)) return false; return c_tree_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1)); } case COMPONENT_REF: if (TREE_OPERAND (t1, 1) != TREE_OPERAND (t2, 1)) return false; return c_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); case PARM_DECL: case VAR_DECL: case CONST_DECL: case FIELD_DECL: case FUNCTION_DECL: case IDENTIFIER_NODE: case SSA_NAME: return false; case TREE_VEC: { unsigned ix; if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2)) return false; for (ix = TREE_VEC_LENGTH (t1); ix--;) if (!c_tree_equal (TREE_VEC_ELT (t1, ix), TREE_VEC_ELT (t2, ix))) return false; return true; } default: break; } switch (TREE_CODE_CLASS (code1)) { case tcc_unary: case tcc_binary: case tcc_comparison: case tcc_expression: case tcc_vl_exp: case tcc_reference: case tcc_statement: { int i, n = TREE_OPERAND_LENGTH (t1); switch (code1) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: n = 1; break; case ARRAY_REF: n = 2; break; default: break; } if (TREE_CODE_CLASS (code1) == tcc_vl_exp && n != TREE_OPERAND_LENGTH (t2)) return false; for (i = 0; i < n; ++i) if (!c_tree_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i))) return false; return true; } case tcc_type: return comptypes (t1, t2); default: gcc_unreachable (); } /* We can get here with --disable-checking. */ return false; } /* Returns true when the function declaration FNDECL is implicit, introduced as a result of a call to an otherwise undeclared function, and false otherwise. */ bool c_decl_implicit (const_tree fndecl) { return C_DECL_IMPLICIT (fndecl); }
pr60823-2.c
/* PR tree-optimization/60823 */ /* { dg-do run } */ /* { dg-require-effective-target vect_simd_clones } */ /* { dg-options "-O2 -fopenmp-simd" } */ #pragma omp declare simd simdlen(4) notinbranch __attribute__((noinline)) int foo (double c1, double c2) { double z1 = c1, z2 = c2; int res = 100, i; for (i = 0; i < 5; i++) { res = (z1 * z1 + z2 * z2 > 4.0) ? (i < res ? i : res) : res; z1 = c1 + z1 * z1 - z2 * z2; z2 = c2 + 2.0 * z1 * z2; c1 += 0.5; c2 += 0.5; } return res; } __attribute__((noinline, noclone)) void bar (double *x, double *y) { asm volatile ("" : : "rm" (x), "rm" (y) : "memory"); } int main () { int i; double c[4] = { 0.0, 1.0, 0.0, 1.0 }; double d[4] = { 0.0, 1.0, 2.0, 0.0 }; int e[4]; bar (c, d); #pragma omp simd safelen(4) for (i = 0; i < 4; i++) e[i] = foo (c[i], d[i]); if (e[0] != 3 || e[1] != 1 || e[2] != 1 || e[3] != 2) __builtin_abort (); return 0; }
loop_fission_par.c
#include <omp.h> void compute(unsigned long **a, unsigned long **b, unsigned long **c, unsigned long **d, int N) { int j, i; for (j=1; j<N; j++) { for (i=1; i<N; i++) { c[i][j] = 3*d[i][j]; // S3 d[i][j] = 2*c[i+1][j]; // S4 } #pragma omp parallel for for (i=1; i<N; i++) { a[i][j] = a[i][j]*b[i][j]; // S1 b[i][j+1] = 2*a[i][j]*c[i-1][j]; // S2 } } }
trmv_x_sky_u_hi_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { #ifdef COMPLEX const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT r = 0; r < m; ++r) { const ALPHA_INT row_start = A->pointers[r]; const ALPHA_INT row_end = A->pointers[r + 1]; ALPHA_INT row_indx = 1; for(ALPHA_INT i = row_start; i < row_end; i++) { ALPHA_INT row_eles = row_end - row_start; ALPHA_INT c = r - row_eles + row_indx; if(i == row_end - 1) { alpha_madde(y[r], alpha, x[c]); } else { ALPHA_Number t; alpha_mul_3c(t, alpha, A->values[i]); alpha_madde(y[r], t, x[c]); } row_indx ++; } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { #ifdef COMPLEX return ONAME_omp(alpha, A, x, beta, y); #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
dnnl_quantize_v2-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file dnnl_quantize_v2-inl.h * \brief */ #ifndef MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_ #define MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_ #if MXNET_USE_ONEDNN == 1 #include <algorithm> #include <string> #include <vector> #include "../../nn/dnnl/dnnl_base-inl.h" #include "../quantize_v2-inl.h" namespace mxnet { namespace op { class SgDNNLQuantizeOperator { public: explicit SgDNNLQuantizeOperator(const nnvm::NodeAttrs& attrs) : param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {} void Forward(const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs); private: bool initalized_{false}; QuantizeV2Param param_; float cached_data_min_{0.f}; float cached_data_max_{0.f}; dnnl::memory::desc o_desc_; dnnl_args_map_t args_; std::shared_ptr<dnnl::reorder> fwd_pd_; }; void SgDNNLQuantizeOperator::Forward(const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { float quantized_range = 0.0; NDArray in_buffer = inputs[0]; float data_min = mshadow::red::limits::MaxValue<float>(); float data_max = mshadow::red::limits::MinValue<float>(); // Pass through quantized data if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) { if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) { *outputs[1].data().dptr<float>() = param_.min_calib_range.value(); *outputs[2].data().dptr<float>() = param_.max_calib_range.value(); } else { if (inputs[0].dtype() == mshadow::kUint8) { *outputs[1].data().dptr<float>() = 0; *outputs[2].data().dptr<float>() = kUint8Range; } else { *outputs[1].data().dptr<float>() = -kInt8Range; *outputs[2].data().dptr<float>() = kInt8Range; } } if (req[0] != kWriteInplace) { const_cast<NDArray&>(outputs[0]).CopyFrom(*inputs[0].GetDNNLData()); DNNLStream::Get()->Submit(); } } else { if (in_buffer.IsView() && in_buffer.IsDNNLData()) in_buffer = inputs[0].Reorder2Default(); auto i_mem = in_buffer.GetDNNLData(); if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) { data_min = param_.min_calib_range.value(); data_max = param_.max_calib_range.value(); } else { // no calib info in_buffer = inputs[0].Reorder2Default(); auto in_ptr = in_buffer.data().dptr<float>(); auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); std::vector<float> data_maxs(nthreads, data_max); std::vector<float> data_mins(nthreads, data_min); #pragma omp parallel for num_threads(nthreads) for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) { int tid = omp_get_thread_num(); if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i]; if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i]; } for (index_t i = 0; i < nthreads; i++) { if (data_maxs[i] > data_max) data_max = data_maxs[i]; if (data_mins[i] < data_min) data_min = data_mins[i]; } if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max)) initalized_ = false; } // Write output min/max auto out_type = GetQuantizeOutputType(param_); if (out_type == mshadow::kUint8) { quantized_range = kUint8Range; *outputs[1].data().dptr<float>() = data_min; *outputs[2].data().dptr<float>() = data_max; } else if (out_type == mshadow::kInt8) { float real_range = MaxAbs(data_min, data_max); quantized_range = kInt8Range; *outputs[1].data().dptr<float>() = -real_range; *outputs[2].data().dptr<float>() = real_range; } else { LOG(FATAL) << "oneDNN quantize op only supports int8 and uint8 as output type"; } if (!initalized_) { cached_data_min_ = data_min; cached_data_max_ = data_max; float real_range = MaxAbs(data_min, data_max); float scale = quantized_range / real_range; dnnl::primitive_attr attr; const int mask = 0; std::vector<float> scales = {scale}; attr.set_output_scales(mask, scales); dnnl::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine(); auto i_desc = i_mem->get_desc(); size_t i_ndim = in_buffer.shape().ndim(); if (i_ndim == 4) { dnnl::memory::format_tag o_fmt = dnnl::memory::format_tag::nhwc; dnnl::memory::dims o_dims(i_desc.data.dims, i_desc.data.dims + i_desc.data.ndims); o_desc_ = dnnl::memory::desc(o_dims, get_dnnl_type(out_type), o_fmt); } else { o_desc_ = i_desc; o_desc_.data.data_type = get_dnnl_type_t(out_type); } auto reorder_pd = dnnl::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc_, attr); fwd_pd_ = std::make_shared<dnnl::reorder>(reorder_pd); initalized_ = true; } auto o_mem = CreateDNNLMem(outputs[0], o_desc_, req[0]); args_[DNNL_ARG_FROM] = *i_mem; args_[DNNL_ARG_TO] = *o_mem.second; DNNLStream::Get()->RegisterPrimArgs(*fwd_pd_, args_); CommitOutput(outputs[0], o_mem); DNNLStream::Get()->Submit(); } } static void SgDNNLQuantizeForward(const OpStatePtr& state_ptr, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { SgDNNLQuantizeOperator& op = state_ptr.get_state<SgDNNLQuantizeOperator>(); op.Forward(ctx, inputs, req, outputs); } } // namespace op } // namespace mxnet #endif // MXNET_USE_ONEDNN == 1 #endif // MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_
LookupTable.h
#ifndef _LOOKUPTABLE_H_ #define _LOOKUPTABLE_H_ /* * LookupTable.h: * Lookup operation, for embeddings * * Created on: Apr 22, 2017 * Author: mszhang */ #include "SparseParam.h" #include "MyLib.h" #include "Alphabet.h" #include "Node.h" #include "Graph.h" #include "ModelUpdate.h" class LookupTable { public: PAlphabet elems; SparseParam E; bool bFineTune; int nDim; int nVSize; int nUNKId; public: LookupTable() { nVSize = 0; nDim = 0; elems = NULL; nUNKId = -1; bFineTune = false; } //random initialization inline void initial(PAlphabet alpha, int dim, bool fineTune = true) { elems = alpha; nVSize = elems->size(); nUNKId = elems->from_string(unknownkey); initialWeights(dim, fineTune); } //initialization by pre-trained embeddings inline bool initial(PAlphabet alpha, const string& inFile, bool fineTune = true, dtype norm = -1) { elems = alpha; nVSize = elems->size(); nUNKId = elems->from_string(unknownkey); return initialWeights(inFile, fineTune, norm); } inline void initialWeights(int dim, bool tune) { if (nVSize == 0 || (nVSize == 1 && nUNKId >= 0)) { std::cout << "please check the alphabet" << std::endl; return; } nDim = dim; E.initial(nDim, nVSize); E.val.random(sqrt(1.0 / nDim)); //E.val.norm2one(); bFineTune = tune; } // default should be fineTune, just for initialization inline bool initialWeights(const string& inFile, bool tune, dtype norm = -1) { if (nVSize == 0 || !elems->is_fixed() || (nVSize == 1 && nUNKId >= 0)) { std::cout << "please check the alphabet" << std::endl; return false; } ifstream inf; if (inf.is_open()) { inf.close(); inf.clear(); } inf.open(inFile.c_str()); if (!inf.is_open()) { std::cout << "please check the input file" << std::endl; return false; } string strLine, curWord; int wordId; vector<string> sLines; sLines.clear(); while (1) { if (!my_getline(inf, strLine)) { break; } if (!strLine.empty()) { sLines.push_back(strLine); } } inf.close(); if (sLines.size() == 0) { return false; } //find the first line, decide the wordDim; vector<string> vecInfo; split_bychar(sLines[0], vecInfo, ' '); nDim = vecInfo.size() - 1; E.initial(nDim, nVSize); E.val = 0; std::cout << "word embedding dim is " << nDim << std::endl; bool bHasUnknown = false; unordered_set<int> indexers; NRVec<dtype> sum(nDim); sum = 0.0; int count = 0; for (int idx = 0; idx < sLines.size(); idx++) { split_bychar(sLines[idx], vecInfo, ' '); if (vecInfo.size() != nDim + 1) { std::cout << "error embedding file" << std::endl; } curWord = vecInfo[0]; //we assume the keys are normalized wordId = elems->from_string(curWord); if (wordId >= 0) { count++; if (nUNKId == wordId) { bHasUnknown = true; } indexers.insert(wordId); for (int idy = 0; idy < nDim; idy++) { dtype curValue = atof(vecInfo[idy + 1].c_str()); sum[idy] += curValue; E.val[wordId][idy] += curValue; } } } if (count == 0) { E.val.random(sqrt(3.0 / nDim)); std::cout << "find no overlapped lexicons in the embedding file" << std::endl; return false; } if (nUNKId >= 0 && !bHasUnknown) { for (int idx = 0; idx < nDim; idx++) { E.val[nUNKId][idx] = sum[idx] / (count + 1); } indexers.insert(nUNKId); count++; std::cout << unknownkey << " not found, using averaged value to initialize." << std::endl; } int oovWords = 0; for (int id = 0; id < nVSize; id++) { if (indexers.find(id) == indexers.end()) { oovWords++; for (int idy = 0; idy < nDim; idy++) { E.val[id][idy] = nUNKId >= 0 ? E.val[nUNKId][idy] : sum[idy] / (count + 1); } } } std::cout << "OOV num is " << oovWords << ", total num is " << nVSize << ", embedding oov ratio is " << oovWords * 1.0 / nVSize << std::endl; std::cout << "unknown id" << nUNKId << std::endl; bFineTune = tune; if (norm > 0) { E.val.norm2one(norm); } return true; } inline void exportAdaParams(ModelUpdate& ada) { if (bFineTune) { ada.addParam(&E); } } inline int getElemId(const string& strFeat) { return elems->from_string(strFeat); } inline void save(std::ofstream &os) const { E.save(os); os << bFineTune << std::endl; os << nDim << std::endl; os << nVSize << std::endl; os << nUNKId << std::endl; } //set alpha directly inline void load(std::ifstream &is, PAlphabet alpha) { E.load(is); is >> bFineTune; is >> nDim; is >> nVSize; is >> nUNKId; elems = alpha; } }; class LookupNode : public Node { public: LookupTable* param; int xid; public: LookupNode() { xid = -1; param = NULL; node_type = "lookup"; } inline void setParam(LookupTable* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); xid = -1; } public: //notice the output //this should be leaf nodes void forward(Graph *cg, const string& strNorm) { assert(param != NULL); xid = param->getElemId(strNorm); if (xid < 0 && param->nUNKId >= 0) { xid = param->nUNKId; } if (param->bFineTune && xid < 0) { std::cout << "Caution: unknown words are not modeled !" << std::endl; } degree = 0; cg->addNode(this); } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { bool result = Node::typeEqual(other); if (!result) return false; LookupNode* conv_other = (LookupNode*)other; if (param != conv_other->param) { return false; } return true; } // for which do no require merge public: void compute() { if (xid >= 0) { param->E.value(xid, val); } else { val.zero(); } } void backward() { assert(param != NULL); if (xid == param->nUNKId || (xid >= 0 && param->bFineTune)) { param->E.loss(xid, loss); } } }; class LookupExecute :public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute LookupNode::generate(bool bTrain, dtype cur_drop_factor) { LookupExecute* exec = new LookupExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; } #endif /*_LOOKUPTABLE_H*/
VecProductQuantizer.h
/** * Copyright (c) 2019-present, Nicolas Le Scouarnec * Copyright (c) 2018-present, Thomson Licensing, SAS. * Copyright (c) 2015-present, Facebook, Inc. * All rights reserved. * * Modifications related the introduction of Quicker ADC (Vectorized Product Quantization) * are licensed under the Clear BSD license found in the LICENSE file in the root directory * of this source tree. * * The rest of the source code is licensed under the BSD+Patents license found in the * LICENSE file in the root directory of this source tree */ // -*- c++ -*- #ifndef FAISS_VEC_PRODUCT_QUANTIZER_H #define FAISS_VEC_PRODUCT_QUANTIZER_H #include <stdint.h> #include <vector> #include "Clustering.h" #include "Heap.h" #include <cstddef> #include <cstring> #include <cstdio> #include <algorithm> #include "FaissAssert.h" #include "VectorTransform.h" #include "IndexFlat.h" #include "utils.h" #include <immintrin.h> #include <x86intrin.h> #include <iostream> #include <boost/align/aligned_allocator.hpp> #include <mkl.h> #include <iomanip> #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define FORCE_INLINE __attribute__((always_inline)) extern "C" { // this is to keep the clang syntax checker happy #ifndef FINTEGER #define FINTEGER int #endif #ifdef ECLIPSE #define __AVX512F__ #endif /* declare BLAS functions, see http://www.netlib.org/clapack/cblas/ */ int sgemm_ ( const char *transa, const char *transb, FINTEGER *m, FINTEGER * n, FINTEGER *k, const float *alpha, const float *a, FINTEGER *lda, const float *b, FINTEGER *ldb, float *beta, float *c, FINTEGER *ldc); } namespace faiss { static void init_hypercube (int d, int nbits, int n, const float * x, float *centroids) { std::vector<float> mean (d); for (int i = 0; i < n; i++) for (int j = 0; j < d; j++) mean [j] += x[i * d + j]; float maxm = 0; for (int j = 0; j < d; j++) { mean [j] /= n; if (fabs(mean[j]) > maxm) maxm = fabs(mean[j]); } for (int i = 0; i < (1 << nbits); i++) { float * cent = centroids + i * d; for (int j = 0; j < nbits; j++) cent[j] = mean [j] + (((i >> j) & 1) ? 1 : -1) * maxm; for (int j = nbits; j < d; j++) cent[j] = mean [j]; } } static void init_hypercube_pca (int d, int nbits, int n, const float * x, float *centroids) { PCAMatrix pca (d, nbits); pca.train (n, x); for (int i = 0; i < (1 << nbits); i++) { float * cent = centroids + i * d; for (int j = 0; j < d; j++) { cent[j] = pca.mean[j]; float f = 1.0; for (int k = 0; k < nbits; k++) cent[j] += f * sqrt (pca.eigenvalues [k]) * (((i >> k) & 1) ? 1 : -1) * pca.PCAMat [j + k * d]; } } } /* * Epu16 "extended" intrisics * Public domain from http://www.alfredklomp.com/programming/sse-intrinsics/ */ static inline __m128i _mm_cmple_epu8 (__m128i x, __m128i y) { // Returns 0xFF where x <= y: return _mm_cmpeq_epi8(_mm_min_epu8(x, y), x); } static inline __m128i _mm_cmpge_epu8 (__m128i x, __m128i y) { // Returns 0xFF where x >= y: return _mm_cmple_epu8(y, x); } static inline __m128i _mm_cmpgt_epu8 (__m128i x, __m128i y) { // Returns 0xFF where x > y: return _mm_andnot_si128( _mm_cmpeq_epi8(x, y), _mm_cmpeq_epi8(_mm_max_epu8(x, y), x) ); } static inline __m128i _mm_cmplt_epu8 (__m128i x, __m128i y) { // Returns 0xFF where x < y: return _mm_cmpgt_epu8(y, x); } static inline __m128i _mm_cmple_epu16 (__m128i x, __m128i y) { // Returns 0xFFFF where x <= y: return _mm_cmpeq_epi16(_mm_subs_epu16(x, y), _mm_setzero_si128()); } static inline __m128i _mm_cmpge_epu16 (__m128i x, __m128i y) { // Returns 0xFFFF where x >= y: return _mm_cmple_epu16(y, x); } static inline __m128i _mm_cmpgt_epu16 (__m128i x, __m128i y) { // Returns 0xFFFF where x > y: return _mm_andnot_si128(_mm_cmpeq_epi16(x, y), _mm_cmple_epu16(y, x)); } static inline __m128i _mm_cmplt_epu16 (__m128i x, __m128i y) { // Returns 0xFFFF where x < y: return _mm_cmpgt_epu16(y, x); } /* * End of EPU16 extended intrisics */ template<class T_TSC> struct QuantizerMAX { std::vector<float> min; float min_sum; float max; int M; float delta; float inv_delta; //float gmin; //float gmin_sum; T_TSC QMAX; QuantizerMAX(float *min_,float min_sum_, float max_, int M_): min(), min_sum(min_sum_), max(max_), M(M_) { QMAX = std::numeric_limits<T_TSC>::max(); //printf("Building quantizer with T_TSC %d\n",std::numeric_limits<T_TSC>::max()); //gmin = std::numeric_limits<float>::max(); min.resize(M); for(int m=0;m<M;m++){ min[m]= min_[m]; FAISS_THROW_IF_NOT_MSG(max > min[m], "Max value to quantize must be larger than min value to quantize"); //gmin = std::min(gmin, min[m]); } //gmin_sum=gmin*M; FAISS_THROW_IF_NOT_MSG(max > min_sum, "Max value to quantize must be larger than min value to quantize"); delta = (max - min_sum) / QMAX; inv_delta = 1.0f/delta; //printf("[%f -- %f] (delta: %g, inv_delta: %g\n)",gmin,max,delta,inv_delta); } void quantize_val(float val, T_TSC* qval, int m) const { int pval = (val - min[m])*inv_delta; if(pval >= QMAX) { *qval = QMAX; return; }else{ *qval = static_cast<T_TSC>(pval); } } void quantize_sum(float val, T_TSC* qval) const { int pval = (val - min_sum)*inv_delta; if(pval >= QMAX) { *qval = QMAX; return; }else{ *qval = static_cast<T_TSC>(pval); } /*if(val >= max) { *qval = QMAX; return; } *qval = static_cast<T_TSC>(((val - min_sum)*inv_delta));*/ } float unquantize_sum(T_TSC qval) const { float fval=qval+0.5; return (fval*delta)+min_sum; } inline void quantize_val_simd(const float* val, int8_t* qval, const int table_size, const int ksub, const int m) const { FAISS_THROW_IF_NOT_MSG(ksub%16 == 0 && table_size%16 == 0 , "Requires table size and ksub to be multiples of of 16"); // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero const __m256 min_r = _mm256_set1_ps(min[m]); const __m256 inv_delta_r = _mm256_set1_ps(inv_delta); const __m128i shuf_r = _mm_set_epi8(15,14,13,12, 7,6,5,4 ,11,10,9,8, 3,2,1,0); for(int i=0;i<ksub/16;i++){ __m128i * t = (__m128i*)&qval[i*16]; float * f1 = (float*)&val[i*16]; float * f2 = (float*)&val[i*16+8]; __m256 low = _mm256_loadu_ps(f1); // 8x32 __m256 high = _mm256_loadu_ps(f2); // 8x32 low = _mm256_sub_ps(low, min_r); high = _mm256_sub_ps(high, min_r); low = _mm256_mul_ps(low, inv_delta_r); high = _mm256_mul_ps(high, inv_delta_r); __m256i lowi = _mm256_cvtps_epi32(low); __m256i highi = _mm256_cvtps_epi32(high); __m256i packed16_interleaved4 = _mm256_packs_epi32(lowi, highi); // A B A B __m128i p16i_l = _mm256_extracti128_si256(packed16_interleaved4,0); // A B __m128i p16i_h = _mm256_extracti128_si256(packed16_interleaved4,1); // A B __m128i packed8_interleaved4 = _mm_packs_epi16(p16i_l, p16i_h); // A B A B // Reorganize... __m128i packed8 = _mm_shuffle_epi8(packed8_interleaved4, shuf_r); // A A A A B B B B _mm_store_si128(t,packed8); } for(int i=ksub/16;i<table_size/16;i++){ // Set to zero __m128i * t = (__m128i*)&qval[i*16]; _mm_store_si128(t, _mm_set1_epi8(0)); } } inline void quantize_val_simd(const float* val, int16_t* qval, const int table_size, const int ksub, const int m) const { // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero FAISS_THROW_IF_NOT_MSG(ksub%16 == 0 && table_size%16 == 0, "Requires table size and ksub to be multiples of of 16"); //FIXME Update to use AVX512 // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero const __m256 min_r = _mm256_set1_ps(min[m]); const __m256 inv_delta_r = _mm256_set1_ps(inv_delta); for(int i=0;i<ksub/16;i++){ __m256i * t = (__m256i*)&qval[i*16]; float * f1 = (float*)&val[i*16]; float * f2 = (float*)&val[i*16+8]; __m256 low = _mm256_loadu_ps(f1); // 8x32 __m256 high = _mm256_loadu_ps(f2); // 8x32 low = _mm256_sub_ps(low, min_r); high = _mm256_sub_ps(high, min_r); low = _mm256_mul_ps(low, inv_delta_r); high = _mm256_mul_ps(high, inv_delta_r); __m256i lowi = _mm256_cvtps_epi32(low); __m256i highi = _mm256_cvtps_epi32(high); __m256i packed16_interleaved4 = _mm256_packs_epi32(lowi, highi); // A B A B __m256i packed16 = _mm256_permute4x64_epi64(packed16_interleaved4,0b11011000); // A A B B _mm256_store_si256(t,packed16); } for(int i=ksub/16;i<table_size/16;i++){ // Set to zero __m256i * t = (__m256i*)&qval[i*16]; _mm256_store_si256(t, _mm256_set1_epi16(0)); } } }; template<> struct QuantizerMAX<uint8_t> { std::vector<float> min; float min_sum; float max; int M; float delta; float inv_delta; //float gmin; //float gmin_sum; uint8_t QMAX; QuantizerMAX() {} virtual ~QuantizerMAX() {} QuantizerMAX(float *min_,float min_sum_, float max_, int M_): min(), min_sum(min_sum_), max(max_*1.001), M(M_) { QMAX = std::numeric_limits<uint8_t>::max(); //gmin = std::numeric_limits<float>::max(); min.resize(M); for(int m=0;m<M;m++){ min[m]= min_[m]; FAISS_THROW_IF_NOT_MSG(max > min[m], "Max value to quantize must be larger than min value to quantize"); //gmin = std::min(gmin, min[m]); } //gmin_sum=gmin*M; FAISS_THROW_IF_NOT_MSG(max > min_sum, "Max value to quantize must be larger than min value to quantize"); delta = (max - min_sum) / QMAX; inv_delta = 1.0f/delta; //printf("[%f -- %f] (delta: %g, inv_delta: %g\n)",gmin,max,delta,inv_delta); } virtual void quantize_val(float val, uint8_t* qval, int m) const { if(val >= max) { *qval = QMAX; return; } *qval = static_cast<uint8_t>(((val - min[m])*inv_delta)); } virtual void quantize_sum(float val, uint8_t* qval) const { if(val >= max) { *qval = QMAX; return; } *qval = static_cast<uint8_t>(((val - min_sum)*inv_delta)); } virtual float unquantize_sum(uint8_t qval) const { float fval=qval+0.5; return (fval*delta)+min_sum; } virtual inline void quantize_val_simd(const float* val, uint8_t* qval, const int table_size, const int ksub, const int m) const { FAISS_THROW_IF_NOT_MSG(ksub%16 == 0 && table_size%16 == 0 , "Requires table size and ksub to be multiples of of 16"); // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero const __m256 min_r = _mm256_set1_ps(min[m]); const __m256 inv_delta_r = _mm256_set1_ps(inv_delta); const __m128i shuf_r = _mm_set_epi8(15,14,13,12, 7,6,5,4 ,11,10,9,8, 3,2,1,0); for(int i=0;i<ksub/16;i++){ __m128i * t = (__m128i*)&qval[i*16]; float * f1 = (float*)&val[i*16]; float * f2 = (float*)&val[i*16+8]; __m256 low = _mm256_loadu_ps(f1); // 8x32 __m256 high = _mm256_loadu_ps(f2); // 8x32 low = _mm256_sub_ps(low, min_r); high = _mm256_sub_ps(high, min_r); low = _mm256_mul_ps(low, inv_delta_r); high = _mm256_mul_ps(high, inv_delta_r); __m256i lowi = _mm256_cvtps_epi32(low); __m256i highi = _mm256_cvtps_epi32(high); __m256i packed16_interleaved4 = _mm256_packs_epi32(lowi, highi); // A B A B __m128i p16i_l = _mm256_extracti128_si256(packed16_interleaved4,0); // A B __m128i p16i_h = _mm256_extracti128_si256(packed16_interleaved4,1); // A B __m128i packed8_interleaved4 = _mm_packus_epi16(p16i_l, p16i_h); // A B A B // Reorganize... __m128i packed8 = _mm_shuffle_epi8(packed8_interleaved4, shuf_r); // A A A A B B B B _mm_store_si128(t,packed8); } for(int i=ksub/16;i<table_size/16;i++){ // Set to zero __m128i * t = (__m128i*)&qval[i*16]; _mm_store_si128(t, _mm_setzero_si128()); } } }; template<> struct QuantizerMAX<uint16_t> { std::vector<float> min; float min_sum; float max; int M; float delta; float inv_delta; //float gmin; //float gmin_sum; uint16_t QMAX; QuantizerMAX(float *min_,float min_sum_, float max_, int M_): min(), min_sum(min_sum_), max(max_*1.001), M(M_) { QMAX = std::numeric_limits<uint16_t>::max(); //gmin = std::numeric_limits<float>::max(); min.resize(M); for(int m=0;m<M;m++){ min[m]= min_[m]; FAISS_THROW_IF_NOT_MSG(max > min[m], "Max value to quantize must be larger than min value to quantize"); //gmin = std::min(gmin, min[m]); } //gmin_sum=gmin*M; FAISS_THROW_IF_NOT_MSG(max > min_sum, "Max value to quantize must be larger than min value to quantize"); delta = (max - min_sum) / QMAX; inv_delta = 1.0f/delta; //printf("[%f -- %f] (delta: %g, inv_delta: %g\n)",gmin,max,delta,inv_delta); } void quantize_val(float val, uint16_t* qval, int m) const { if(val >= max) { *qval = QMAX; return; } *qval = static_cast<uint16_t>(((val - min[m])*inv_delta)); } void quantize_sum(float val, uint16_t* qval) const { if(val >= max) { *qval = QMAX; return; } *qval = static_cast<uint16_t>(((val - min_sum)*inv_delta)); } float unquantize_sum(uint16_t qval) const { float fval=qval+0.5; return (fval*delta)+min_sum; } #if 0 inline void quantize_val_simd(const float* val, uint16_t* qval, const int table_size, const int ksub, const int m) const { // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero FAISS_THROW_IF_NOT_MSG(ksub%16 == 0 && table_size%16 == 0, "Requires table size and ksub to be multiples of of 16"); // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero const __m512 min_r = _mm512_set1_ps(min[m]); const __m512 inv_delta_r = _mm512_set1_ps(inv_delta); for(int i=0;i<ksub/16;i++){ __m512i * t = (__m512i*)&qval[i*16]; float * f1 = (float*)&val[i*16]; __m512 all = _mm512_loadu_ps(f1); // 8x32 all = _mm512_sub_ps(all, min_r); all = _mm512_mul_ps(all, inv_delta_r); __m512i alli = _mm512_cvt_roundps_epi32(all,_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); __m256i lowi = _mm512_extract; __m256i highi = _mm512_extract; __m256i packed16_interleaved4 = _mm256_packs_epi32(lowi, highi); // A B A B __m256i packed16 = _mm256_permute4x64_epi64(packed16_interleaved4,0b11011000); // A A B B __m256i offset16 = _mm256_add_epi16(packed16, _mm256_set1_epi16(1)); _mm256_store_si256(t,offset16); } for(int i=ksub/16;i<table_size/16;i++){ // Set to zero __m256i * t = (__m256i*)&qval[i*16]; _mm256_store_si256(t, _mm256_set1_epi16(1)); } } #else inline void quantize_val_simd(const float* val, uint16_t* qval, const int table_size, const int ksub, const int m) const { // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero FAISS_THROW_IF_NOT_MSG(ksub%16 == 0 && table_size%16 == 0, "Requires table size and ksub to be multiples of of 16"); // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero const __m256 min_r = _mm256_set1_ps(min[m]); const __m256 inv_delta_r = _mm256_set1_ps(inv_delta); for(int i=0;i<ksub/16;i++){ __m256i * t = (__m256i*)&qval[i*16]; float * f1 = (float*)&val[i*16]; float * f2 = (float*)&val[i*16+8]; __m256 low = _mm256_loadu_ps(f1); // 8x32 __m256 high = _mm256_loadu_ps(f2); // 8x32 low = _mm256_sub_ps(low, min_r); high = _mm256_sub_ps(high, min_r); low = _mm256_mul_ps(low, inv_delta_r); high = _mm256_mul_ps(high, inv_delta_r); __m256i lowi = _mm256_cvtps_epi32(low); __m256i highi = _mm256_cvtps_epi32(high); __m256i packed16_interleaved4 = _mm256_packus_epi32(lowi, highi); // A B A B __m256i packed16 = _mm256_permute4x64_epi64(packed16_interleaved4,0b11011000); // A A B B _mm256_store_si256(t,packed16); } for(int i=ksub/16;i<table_size/16;i++){ // Set to zero __m256i * t = (__m256i*)&qval[i*16]; _mm256_store_si256(t, _mm256_setzero_si256()); } } #endif }; // Quantizer used for Qadc (query-time learned max and global_min) template<class T_TSC> struct QuantizerMAX_qadc : QuantizerMAX<T_TSC> { QuantizerMAX_qadc(float *min_,float min_sum_, float max_, int M_): QuantizerMAX<T_TSC>(min_,min_sum_, max_, M_) { // Override delta calculation with the more conservative delta of QuickADC QuantizerMAX<T_TSC>::delta = (max_ - min_[0]) / QuantizerMAX<T_TSC>::QMAX; QuantizerMAX<T_TSC>::inv_delta = 1.0f/QuantizerMAX<T_TSC>::delta; } }; #ifndef SWIG // Quantizer used for Bolt (trained-time learned max and global min) // Plus negative values quantized to zero. struct QuantizerMAX_bolt : QuantizerMAX<uint8_t> { float min_quant; float max_quant; int M; float delta; float inv_delta; uint8_t QMAX; QuantizerMAX_bolt(float min_q,float max_q, int M_): QuantizerMAX<uint8_t>(), min_quant(min_q), max_quant(max_q), M(M_) { QMAX = std::numeric_limits<uint8_t>::max(); FAISS_THROW_IF_NOT_MSG(max_quant > min_quant, "Max value to quantize must be larger than min value to quantize"); delta = (max_quant - min_quant) / QMAX; inv_delta = 1.0f/delta; //printf("[%f -- %f] (delta: %g, inv_delta: %g\n)",gmin,max,delta,inv_delta); } void quantize_val(float val, uint8_t* qval, int m) const { float pval = std::min(255.0f,inv_delta*std::max(0.0f,val-min_quant)); if(pval <= 0){ *qval = 0; }else if(val >= max_quant) { *qval = QMAX; }else{ *qval = static_cast<uint8_t>(pval); } } void quantize_sum(float val, uint8_t* qval) const { float pval = std::min(255.0f,inv_delta*std::max(0.0f,val-min_quant)); *qval = static_cast<uint8_t>(pval); } float unquantize_sum(uint8_t qval) const { float fval=qval+0.5; return (fval*delta)+min_quant; } void quantize_sum16(float val, int16_t* qval) const { float pval = std::min(32767.0f,inv_delta*std::max(0.0f,val-M*min_quant)); *qval = static_cast<int16_t>(pval); } float unquantize_sum16(int16_t qval) const { float fval=qval+0.5; return (fval*delta)+M*min_quant; } inline void quantize_val_simd(const float* val, uint8_t* qval, const int table_size, const int ksub, const int m) const { FAISS_THROW_IF_NOT_MSG(ksub%16 == 0 && table_size%16 == 0 , "Requires table size and ksub to be multiples of of 16"); // Set values 0 to ksub to their quantized values and values table_size to ksub-1, if any, to zero const __m256 min_r = _mm256_set1_ps(min_quant); const __m256 inv_delta_r = _mm256_set1_ps(inv_delta); const __m128i shuf_r = _mm_set_epi8(15,14,13,12, 7,6,5,4 ,11,10,9,8, 3,2,1,0); for(int i=0;i<ksub/16;i++){ __m128i * t = (__m128i*)&qval[i*16]; float * f1 = (float*)&val[i*16]; float * f2 = (float*)&val[i*16+8]; __m256 low = _mm256_loadu_ps(f1); // 8x32 __m256 high = _mm256_loadu_ps(f2); // 8x32 low = _mm256_sub_ps(low, min_r); high = _mm256_sub_ps(high, min_r); low = _mm256_mul_ps(low, inv_delta_r); high = _mm256_mul_ps(high, inv_delta_r); __m256i lowi = _mm256_cvtps_epi32(low); __m256i highi = _mm256_cvtps_epi32(high); __m256i packed16_interleaved4 = _mm256_packs_epi32(lowi, highi); // A B A B __m128i p16i_l = _mm256_extracti128_si256(packed16_interleaved4,0); // A B __m128i p16i_h = _mm256_extracti128_si256(packed16_interleaved4,1); // A B __m128i packed8_interleaved4 = _mm_packus_epi16(p16i_l, p16i_h); // A B A B // Reorganize... __m128i packed8 = _mm_shuffle_epi8(packed8_interleaved4, shuf_r); // A A A A B B B B _mm_store_si128(t,packed8); } for(int i=ksub/16;i<table_size/16;i++){ // Set to zero __m128i * t = (__m128i*)&qval[i*16]; _mm_store_si128(t, _mm_set1_epi8(0)); } } }; #endif /** * Vectorized Product Quantizer. Implemented only for METRIC_L2 * This is a templated class supporting different SIMD width, and supporting different type of quantizers. * A few example instantiations are given bellow. * * VecProductQuantizer<12,3,6,6,4,0,uint16_t,__m512i,__m512i> // AVX512 - 16 bits distances * VecProductQuantizer<12,3,5,5,5,0,uint16_t,__m512i,__m512i> // AVX512 - 16 bits distances * VecProductQuantizer<12,3,6,5,5,0,uint16_t,__m512i,__m512i> // AVX512 - 16 bits distances * VecProductQuantizer<16,2,4,4,0,0,uint8_t,__m128i,__m128i> // SSE4.2 - 8 bits distances * VecProductQuantizer<16,2,4,4,0,0,int8_t,_mm128i,__m256i> // AVX256 - 8 bits distances (AVX 256 <- use two 128-bit lanes) * VecProductQuantizer<16,2,4,4,0,0,int8_t,_mm128i,__m512i> // AVX512 - 8 bits distances (AVX 512 <- use four 128-bit lanes) * VecProductQuantizer<8,1,8,0,0,0,int8_t,_mm512i,__m512i> // AVX512 - 8 bits distances (256 element table) * * Note, it is prefered to put {6,6,4} for quantizer specification, rather than {4,6,6} due to the way remaining dsubs are allocated. * * * @tparam T_M Number of sub_quantizers * @tparam T_P Number of sub quantizers code that can be stored in a single SIMD element (epi16 or epi8) * @tparam T_PG_0 Layout in term of bits between subquantizers within a SIMD element (array whose value sum up to the width a of single element) * @tparam T_PG_1 Layout in term of bits between subquantizers within a SIMD element (array whose value sum up to the width a of single element) * @tparam T_PG_2 Layout in term of bits between subquantizers within a SIMD element (array whose value sum up to the width a of single element) * @tparam T_PG_3 Layout in term of bits between subquantizers within a SIMD element (array whose value sum up to the width a of single element) * @tparam T_TSC Basic type correspond to elements (uint16_t for epi16) * @tparam T_TSCMM SIMD type desired (__m512i for AVX-512) * @tparam T_TSCMMXL Specification of cross lanes type (e.g., AVX-256 works as two independent 128-bit lanes, other examples use a single 128 or 512-bit lane). */ template<int T_M,int T_P,int T_PG_0, int T_PG_1, int T_PG_2, int T_PG_3, class T_TSC, class T_TSCMM, class T_TSCMMXL> struct AbstractVecProductQuantizer { typedef CMax<float, long> CHeap; public: static constexpr size_t M = T_M; ///< number of subquantizers // Variants: // A: 6,6,4 = 16 bits // B: 5,5,5 = 15 bits + 1 bit padding // C: 5,5,6 = 16 bits // D: 4,4 (AVX 256) // => Repeated M times. // Interleaving static constexpr size_t simd_lanes = sizeof(T_TSCMM)/sizeof(T_TSC); static constexpr size_t simd_lane_width_bits = 8*sizeof(T_TSC); static constexpr size_t simd_width_bits = simd_lanes * simd_lane_width_bits; // 512 static constexpr size_t simd_width_bytes = simd_width_bits / 8; static constexpr size_t simd_lane_width_bytes = simd_lane_width_bits / 8; // Derived values static constexpr size_t subcodes_per_lane = T_P; static constexpr size_t lanes_per_code = T_M/subcodes_per_lane; static constexpr size_t codes_per_group = simd_lanes; //static constexpr size_t group_size_bytes = lanes_per_code * codes_per_group * simd_lane_width_bytes; static constexpr size_t cross_lane=sizeof(T_TSCMMXL)/sizeof(T_TSCMM); struct group { union { T_TSC c[lanes_per_code*codes_per_group] ; T_TSCMM mm[lanes_per_code]; T_TSCMMXL mmxl[lanes_per_code/cross_lane]; }; }; static_assert(sizeof(T_TSC)*lanes_per_code*codes_per_group == sizeof(T_TSCMM)*lanes_per_code, "Incorrect layout specification"); static_assert(sizeof(T_TSC)*lanes_per_code*codes_per_group == sizeof(T_TSCMMXL)*lanes_per_code/cross_lane, "Incorrect layout specification"); size_t d; ///< size of the input vectors // Indicator for initial scan (consumed by IndexVPQ, used for overwriting the default value for Bolt : Bolt use 0) int initial_scan_estim_parameter=4; int get_initial_scan_estim_parameter(){ return initial_scan_estim_parameter; } // Layout (exemple for AVX512 - 16 bit distances) // a1a2a3 b1b2b3 c1c2c3 ... z1z2z3 = 16 bits x 32 = 512 bits // a4a5a6 b4b5b6 c4c5c6 ... z4z5z6 = 16 bits x 32 = 512 bits // a7a8a9 b7b8b9 c7c8c9 ... z7z8z9 = 16 bits x 32 = 512 bits // axaxax bxbxbx cxcxcx ... zxzxzx = 16 bits x 32 = 512 bits // Group : 4 (4x3 = 12 subquantizers) // Layout (example for SSE 4.2 - 8 bit distances) // a1a2 b1b2 c1c2 ... p1p2 = 8 bits x 16 = 128 bits // a3a4 b3b4 c3c4 ... p3p4 = 8 bits x 16 = 128 bits // ... // axax bxbx cxcx ... pxpx = 8 bits x 16 = 128 bits // Group : 8 (8x2=16 subquantizers) // Offset for accessing/reconstructing real vectors. size_t dsub[T_M]; ///< dimensionality of each subvector size_t dsub_offset[T_M]; size_t dsub_total; // Total uncompressed = 128 // * compressed: ceil( 128 * x*6/(x*6 + y*4))=a => encode to y vectors of a/y (rest on last) // * compressed: 128-a => encode to z vectors of b/z (rest on last) // Variables for accessing the size_t csub_nbits[T_M]; size_t csub_register[T_M]; size_t csub_mask_inlane[T_M]; // (mask with offset already applied) size_t csub_offset_inlane[T_M]; size_t ksub[T_M]; // Number of centroids for each subquantizer size_t ksub_offset[T_M]; size_t ksub_total; size_t ksub_sq[T_M]; // Number of centroids for each subquantizer size_t ksub_sq_offset[T_M]; size_t ksub_sq_total; size_t dt_lanes[T_M/cross_lane]; // Number of lanes needed for holding distance table (1 or 2) size_t dt_lanes_offset[T_M/cross_lane]; // Offset of given lanes size_t dt_lanes_total; // Total number of lanes // values derived from the above //size_t byte_per_idx; ///< nb bytes per code component (1 or 2) //size_t code_size; ///< byte per indexed vector bool verbose; ///< verbose during training? bool ultra_verbose; ///< verbose during setup ? /// initialization enum train_type_t { Train_default, Train_hot_start, ///< the centroids are already initialized Train_shared, ///< share dictionary accross PQ segments Train_hypercube, ///< intialize centroids with nbits-D hypercube Train_hypercube_pca, ///< intialize centroids with nbits-D hypercube }; train_type_t train_type; ClusteringParameters cp; ///< parameters used during clustering /// Centroid table, size Sum(ksub[i]*dsub[i]) std::vector<float> centroids; size_t centroids_offset[T_M]; size_t centroids_size[T_M]; size_t centroids_total_size; // Sum_i ( ksub[i] * dsub[i]) -- use as a matrix size /// return the centroids associated with subvector m float * get_centroids (size_t m, size_t i) { return &centroids[centroids_offset[m] + dsub[m]*i]; } const float * get_centroids (size_t m, size_t i) const { return &centroids[centroids_offset[m] + dsub[m]*i]; } // Train the product quantizer on a set of points. A clustering // can be set on input to define non-default clustering parameters virtual void train (int n, const float *x) { if (train_type != Train_shared) { train_type_t final_train_type; final_train_type = train_type; if (train_type == Train_hypercube || train_type == Train_hypercube_pca) { if (d < T_M/T_P*8*sizeof(T_TSC)) { final_train_type = Train_default; printf ("cannot train hypercube: nbits=%ld > log2(d=%ld)\n", T_M/T_P*8*sizeof(T_TSC), d); } } for (int m = 0; m < M; m++) { float * xslice = new float[n * dsub[m]]; ScopeDeleter<float> del (xslice); for (int j = 0; j < n; j++) memcpy (xslice + j * dsub[m], x + j * d + dsub_offset[m], dsub[m] * sizeof(float)); Clustering clus (dsub[m], ksub[m], cp); clus.verbose = this->verbose; // we have some initialization for the centroids if (final_train_type != Train_default) { clus.centroids.resize (dsub[m] * ksub[m]); } switch (final_train_type) { case Train_hypercube: init_hypercube (dsub[m],csub_nbits[m] , n, xslice, clus.centroids.data ()); break; case Train_hypercube_pca: init_hypercube_pca (dsub[m], csub_nbits[m], n, xslice, clus.centroids.data ()); break; case Train_hot_start: memcpy (clus.centroids.data(), get_centroids (m, 0), dsub[m] * ksub[m] * sizeof (float)); break; default: ; } if(verbose) { clus.verbose = true; printf ("Training PQ slice %d/%zd\n", m, M); } IndexFlatL2 index (dsub[m]); clus.train (n, xslice, index); set_params (clus.centroids.data(), m); } } else { FAISS_THROW_IF_NOT (false); #if 0 // This code path has not been updated for PQ with sub quantizers of unequal width. Clustering clus (dsub, ksub, cp); if(verbose) { clus.verbose = true; printf ("Training all PQ slices at once\n"); } IndexFlatL2 index (dsub); clus.train (n * M, x, index); for (int m = 0; m < M; m++) { set_params (clus.centroids.data(), m); } #endif } } AbstractVecProductQuantizer(size_t d) : /* dimensionality of the input vectors */ d(d) { set_derived_values(); } AbstractVecProductQuantizer () : d(0) { set_derived_values(); } virtual ~AbstractVecProductQuantizer () { } inline int t_pg(int i){ switch(i){ case 0: return T_PG_0; case 1: return T_PG_1; case 2: return T_PG_2; case 3: return T_PG_3; default: FAISS_THROW_MSG("UNSUPPORT T_P SPECIFICATION"); } } inline int t_pgp(int i){ //return std::pow((double)t_pg(i),(double)1.25); return t_pg(i); } /* Number of groups needed for storing n vectors */ static size_t nb_groups(size_t n){ return (n+codes_per_group-1)/codes_per_group; } /// compute derived values when d, M and nbits have been set void set_derived_values () { verbose = false; ultra_verbose = false; // quite a few derived values // FAISS_THROW_IF_NOT (d % M == 0); // To support 12 sub quantizers, we have to deal with such case. FAISS_THROW_IF_NOT (d % lanes_per_code == 0); size_t dsub_per_lane = d / lanes_per_code; if(ultra_verbose) std::cout << "d " << d << std::endl; size_t bits_per_lane = t_pgp(0)+t_pgp(1)+t_pgp(2)+t_pgp(3); //size_t bits_per_lane = T_PG_0 + T_PG_1 + T_PG_2 + T_PG_3; size_t dsub_per_bit = dsub_per_lane / bits_per_lane; size_t dsub_lane[T_P]; size_t consumed_dsubs=0; for(size_t i=0;i<T_P;i++){ dsub_lane[i]=dsub_per_bit * t_pgp(i); // dsub_lane[i]=dsub_per_bit * t_pg(i); consumed_dsubs += dsub_lane[i]; } // Assign remaining dsubs for(size_t i=0; consumed_dsubs < dsub_per_lane ; i=(i+1)%T_P){ dsub_lane[i] += 1; consumed_dsubs += 1; } if(ultra_verbose){ for(size_t i=0;i<T_P;i++){ std::cout << "dsub_lane[" << i<<"] " << dsub_lane[i] << std::endl; } } // Compute the dsub_offsets; size_t current_centroid_offset_inlane=0; dsub_total=0; ksub_total=0; dt_lanes_total=0; ksub_sq_total=0; centroids_total_size=0; for(size_t i=0,j=0;i<M;i++,j=i%T_P){ dsub[i]=dsub_lane[j]; dsub_offset[i]=dsub_total; dsub_total+=dsub[i]; ksub[i]=1 << t_pg(j); ksub_offset[i]=ksub_total; ksub_total+=ksub[i]; ksub_sq[i]=ksub[i]*ksub[i]; ksub_sq_offset[i]=ksub_sq_total; ksub_sq_total+=ksub_sq[i]; centroids_offset[i]=centroids_total_size; centroids_size[i]=dsub[i]*ksub[i]; centroids_total_size+=dsub[i]*ksub[i]; csub_nbits[i] = t_pg(j); csub_register[i] = i/T_P; csub_offset_inlane[i] = current_centroid_offset_inlane; csub_mask_inlane[i] = ((1 << t_pg(j)) - 1) << current_centroid_offset_inlane; current_centroid_offset_inlane= j == T_P-1 ? 0 : current_centroid_offset_inlane + t_pg(j); if(ultra_verbose){ std::cout << "dsub[" << i << "] " << dsub[i] << std::endl; std::cout << "dsub_offset[" << i << "]" << dsub_offset[i] << std::endl; std::cout << "ksub[" << i << "] " << ksub[i] << std::endl; std::cout << "ksub_offset[" << i << "] " << ksub_offset[i] << std::endl; std::cout << "ksub_sq[" << i << "] " << ksub_sq[i] << std::endl; std::cout << "ksub_sq_offset[" << i << "] " << ksub_sq_offset[i] << std::endl; std::cout << "centroids_offset[" << i << "] " << centroids_offset[i] << std::endl; std::cout << "centroids_size[" << i << "] " << centroids_size[i] << std::endl; std::cout << "csub_nbits[" << i << "] " << csub_nbits[i] << std::endl; std::cout << "csub_register[" << i << "] " << csub_register[i] << std::endl; std::cout << "csub_offset_inlane[" << i << "] " << csub_offset_inlane[i] << std::endl; std::cout << "csub_mask_inlane[" << i << "] " << std::hex << csub_mask_inlane[i] << std::dec << std::endl; } } for(size_t i=0;i<M/T_P/cross_lane;i++){ for(size_t j=0;j<T_P;j++){ dt_lanes[i*T_P+j]=(ksub[i*cross_lane*T_P+j]+simd_lanes-1)/simd_lanes*cross_lane; dt_lanes_offset[i*T_P+j]=dt_lanes_total; dt_lanes_total+=dt_lanes[i*T_P+j]; } if(ultra_verbose){ for(size_t j=0;j<T_P;j++){ std::cout << "dt_lanes[" << i*T_P+j << "] " << dt_lanes[i*T_P+j] << std::endl; std::cout << "dt_lanes_offset[" << i*T_P+j << "] " << dt_lanes_offset[i*T_P+j] << std::endl; } } } //byte_per_idx = (nbits + 7) / 8; //code_size = byte_per_idx * M; if(ultra_verbose){ std::cout << "dsub_total: " << dsub_total << std::endl; std::cout << "ksub_total " << ksub_total << std::endl; std::cout << "ksub_sq_total " << ksub_sq_total << std::endl; std::cout << "centroids_total_size " << centroids_total_size << std::endl; std::cout << "dt_lanes_total " << dt_lanes_total << std::endl; } centroids.resize(centroids_total_size); train_type = Train_default; } /// Define the centroids for subquantizer m void set_params (const float * centroids_, int m){ memcpy (get_centroids(m, 0), centroids_, ksub[m] * dsub[m] * sizeof (centroids_[0])); } inline void set_code_component(group * codes, size_t idx, size_t m, unsigned val) const { /* Merge with other subquantizer indexes */ int group_index = idx / codes_per_group; int index_in_group = idx % codes_per_group; T_TSC c = codes[group_index].c[codes_per_group*csub_register[m]+index_in_group]; c = c & ~csub_mask_inlane[m]; c = c | (val << csub_offset_inlane[m]); codes[group_index].c[codes_per_group*csub_register[m]+index_in_group] = c; } inline unsigned get_code_component(const group * codes, size_t idx, size_t m) const { /* Merge with other subquantizer indexes */ int group_index = idx / codes_per_group; int index_in_group = idx % codes_per_group; T_TSC c = codes[group_index].c[codes_per_group*csub_register[m]+index_in_group]; return (c & csub_mask_inlane[m]) >> csub_offset_inlane[m]; } /// Quantize one vector with the product quantizer void encode (const float * x, group * codes, size_t idx) const { for (size_t m = 0; m < M; m++) { float distances [ksub[m]]; float mindis = 1e20; unsigned idxm = 0; const float * xsub = x + dsub_offset[m]; fvec_L2sqr_ny (distances, xsub, get_centroids(m, 0), dsub[m], ksub[m]); /* Find best centroid */ size_t i; for (i = 0; i < ksub[m]; i++) { float dis = distances [i]; if (dis < mindis) { mindis = dis; idxm = i; } } set_code_component(codes, idx, m, idxm); } } /// same as compute_code for several vectors void encode_multiple (const float * x, group * codes, size_t offset_idx, size_t n) const { if (d/M < 16) { // simple direct computation #pragma omp parallel for for (size_t i = 0; i < n; i++) encode(x + i * d, codes, i+offset_idx); } else { // worthwile to use BLAS float *dis_tables = new float [n * ksub_total]; ScopeDeleter<float> del (dis_tables); compute_distance_tables (n, x, dis_tables); #pragma omp parallel for for (size_t i = 0; i < n; i++) { const float * tab = dis_tables + i * ksub_total; encode_from_distance_table (tab, codes, i+offset_idx); } } } void copy_code(group * codes_dst, size_t offset_dst, const group* codes_src, size_t offset_src, size_t n) { for(size_t i=0;i<n;i++){ for(size_t m=0;m < M; m++){ set_code_component(codes_dst, i+offset_dst, m, get_code_component(codes_src, i+offset_src, m)); } } } void append_codes(std::vector<group, boost::alignment::aligned_allocator<group, 64>>& codes_dst,size_t * count_dst, const group* codes_src, size_t offset_src, size_t n) { if(codes_dst.size() < nb_groups(n+*count_dst)) codes_dst.resize(nb_groups(n+*count_dst)); copy_code(codes_dst.data(),*count_dst,codes_src,offset_src,n); (*count_dst)+=n; } /// decode a vector from a given code (or n vectors if third argument) void decode(const group *codes, float *x, size_t idx) const{ for (size_t m = 0; m < M; m++) { memcpy(x + dsub_offset[m], get_centroids(m, get_code_component(codes, idx, m)), sizeof(float) * dsub[m]); } } void decode_multiple(const group *codes, float *x, size_t offset_idx, size_t n) const{ for (size_t i = 0; i < n; i++) { this->decode(codes, x + d * i, i+offset_idx); } } /// If we happen to have the distance tables precomputed, this is /// more efficient to compute the codes. inline void encode_from_distance_table (const float *tab, group *codes, size_t idx) const { for (size_t m = 0; m < M; m++) { float mindis = 1e20; unsigned idxm = 0; /* Find best centroid */ for (size_t j = 0; j < ksub[m]; j++) { float dis = *tab++; if (dis < mindis) { mindis = dis; idxm = j; } } set_code_component(codes, idx, m, idxm); } } /** Compute distance table for one vector. * * The distance table for x = [x_0 x_1 .. x_(M-1)] is a M * ksub * matrix that contains * * dis_table (m, j) = || x_m - c_(m, j)||^2 * for m = 0..M-1 and j = 0 .. ksub - 1 * * where c_(m, j) is the centroid no j of sub-quantizer m. * * @param x input vector size d * @param dis_table output table, size M * ksub */ void compute_distance_table (const float * x, float * dis_table) const { size_t m; for (m = 0; m < M; m++) { fvec_L2sqr_ny (dis_table + ksub_offset[m], x + dsub_offset[m], get_centroids(m, 0), dsub[m], ksub[m]); } } void compute_inner_prod_table (const float * x, float * dis_table) const { size_t m; for (m = 0; m < M; m++) { fvec_inner_products_ny (dis_table + ksub_offset[m], x + dsub_offset[m], get_centroids(m, 0), dsub[m], ksub[m]); } } /** compute distance table for several vectors * @param nx nb of input vectors * @param x input vector size nx * d * @param dis_table output table, size nx * M * ksub */ void compute_distance_tables (size_t nx, const float * x, float * dis_tables) const { if ( d/M < 16) { #pragma omp parallel for for (size_t i = 0; i < nx; i++) { compute_distance_table (x + i * d, dis_tables + i * ksub_total); } } else { // use BLAS for (int m = 0; m < M; m++) { pairwise_L2sqr (dsub[m], nx, x + dsub_offset[m], ksub[m], centroids.data() + centroids_offset[m], dis_tables + ksub_offset[m], d, dsub[m], ksub_total); } } } void compute_inner_prod_tables (size_t nx, const float * x, float * dis_tables) const{ if ( d/M < 16) { #pragma omp parallel for for (size_t i = 0; i < nx; i++) { compute_inner_prod_table (x + i * d, dis_tables + i * ksub_total); } } else { // use BLAS // compute distance tables for (int m = 0; m < M; m++) { FINTEGER ldc = ksub_total, nxi = nx, ksubi = ksub[m], dsubi = dsub[m], di = d; float one = 1.0, zero = 0; sgemm_ ("Transposed", "Not transposed", &ksubi, &nxi, &dsubi, &one, &centroids [centroids_offset[m]], &dsubi, x + dsub_offset[m], &di, &zero, dis_tables + ksub_offset[m], &ldc); } } } inline void lookup_and_update_heap(size_t ncodes, size_t offset, const group * codes, const float * dis_table, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const { for (size_t j = 0; j < ncodes; j++) { float dis = 0; for (size_t m = 0; m < M; m+=1) { int c = get_code_component(codes, offset+j, m); dis += dis_table[ksub_offset[m] + c]; } long id; if(store_pairs){ id = (key << 32 | (j+offset)); }else{ id = list_ids[j+offset]; } heap_pushpop<CHeap>(k, heap_dis, heap_ids, dis+dis0, id); } } typedef QuantizerMAX<T_TSC> VPQQuant; typedef T_TSCMM QuantTableLane; // Protected for use by subclasses in quantize tables virtual void build_mm_tables(const float* dis_table, T_TSCMM* mm_dis_tables, QuantizerMAX<T_TSC> * quant) const{ // Get pointer to primitive type array T_TSC* q_dis_tables = (T_TSC*)mm_dis_tables; for(int il=0;il<lanes_per_code/cross_lane;il++){ // For each group of lanes (T_M/T_P/T_XL) for(int j=0;j<subcodes_per_lane;j++){ // For each sub-code within lane (T_P) //T_TSC* qt = q_dis_tables[dt_lanes_offset[il/T_XL*subcodes_per_lane+j]*T_LANES]; // Lane(s) at position (il*T_XL*T_P + j*T_XL + i ) will hold table for quantizer (il*T_XL*T_P + i*T_P + j) // A distance table may require multiple SIMD lanes... cross lane has the priority in that case... (poorly tested) int lane_group_pos = il*T_P+j; int lane_offset = dt_lanes_offset[lane_group_pos]*simd_lanes; int lane_per_table= dt_lanes[lane_group_pos]/cross_lane; for(int l=0;l<lane_per_table;l++){ for(int n=0;n<cross_lane;n++){ #define SIMD_QUANT #ifdef SIMD_QUANT int lane_offset_f = lane_offset + l*cross_lane*simd_lanes + n*simd_lanes; int m = (il*cross_lane+n)*subcodes_per_lane+j; int ksub_offset_r = ksub_offset[m]+l*simd_lanes; int ksub_remaining = ksub[m]-l*simd_lanes; quant->quantize_val_simd(&dis_table[ksub_offset_r], (T_TSC*)&q_dis_tables[lane_offset_f], simd_lanes, ksub_remaining, m); #else for(int o=0;o<simd_lanes;o++){ int ix = l*simd_lanes+o; int lane_offset_f = lane_offset + l*cross_lane*simd_lanes + n*simd_lanes+o; int m = (il*cross_lane+n)*subcodes_per_lane+j; if(ix < ksub[m]){ qmax->quantize_val(dis_table[ksub_offset[m]+ix], &q_dis_tables[lane_offset_f],m); }else{ q_dis_tables[lane_offset_f]=0; } } #endif } } } } } virtual VPQQuant * quantize_tables(const float* dis_table, QuantTableLane* mm_dis_tables, float max) const{ /* Find the global per-table minimum */ float gmin_global=std::numeric_limits<float>::infinity(); float gmin_sum=0.0; float gmin_current[T_M]; for(size_t m=0;m<T_M;m++){ gmin_current[m]=std::numeric_limits<float>::infinity(); for(size_t i=0;i<ksub[m];i++){ gmin_current[m]=std::min(dis_table[ksub_offset[m]+i],gmin_current[m]); gmin_global=std::min(dis_table[ksub_offset[m]+i],gmin_global); } gmin_sum+=gmin_current[m]; } if(gmin_sum >= max){ /* We cannot build a quantizer as all distances quantize to infinite, the whole cell/inverted list can be skipped */ return nullptr; }else{ /* We can build a quantizer, we will need to scan the inverted list/cell */ VPQQuant * quant = new VPQQuant(gmin_current,gmin_sum,max,T_M); this->build_mm_tables(dis_table, mm_dis_tables, quant); return quant; } } virtual inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * codes, const float * dis_table, T_TSCMM* mm_dis_tables, VPQQuant* qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const { FAISS_THROW_MSG("Incomplete implementation."); } /** perform a search (L2 distance) * @param x query vectors, size nx * d * @param nx nb of queries * @param codes database codes, size ncodes * byte_per_idx * @param ncodes nb of nb vectors * @param res heap array to store results (nh == nx) * @param init_finalize_heap initialize heap (input) and sort (output)? */ void search(const float * x, size_t nx, const group * codes, const size_t ncodes, float_maxheap_array_t *res, bool init_finalize_heap = true, float initial_scan_estim_param = 4) const { size_t k = res->k; FAISS_THROW_IF_NOT (nx == res->nh); float * dis_tables = new float [nx * ksub_total]; ScopeDeleter<float> del(dis_tables); compute_distance_tables (nx, x, dis_tables); #pragma omp parallel for for (size_t i = 0; i < nx; i++) { /* query preparation for asymmetric search: compute look-up tables */ const float* dis_table = dis_tables + i * ksub_total; float dis0=0; /* Compute distances and keep smallest values */ long * __restrict heap_ids = res->ids + i * k; float * __restrict heap_dis = res->val + i * k; if (init_finalize_heap) { heap_heapify<CHeap> (k, heap_dis, heap_ids); } const size_t codes_scanned_to_build_quantizer = std::min(ncodes,static_cast<unsigned long>(k*initial_scan_estim_param)); lookup_and_update_heap(codes_scanned_to_build_quantizer, 0, codes, dis_table,k, heap_dis, heap_ids, dis0,0 , NULL, 1); std::vector<QuantTableLane,boost::alignment::aligned_allocator<QuantTableLane, 64>> mm_dis_tables; mm_dis_tables.resize(dt_lanes_total); VPQQuant * qmax = quantize_tables(dis_table, mm_dis_tables.data(), heap_dis[0]); if(qmax != nullptr){ lookup_and_update_heap_simd(ncodes-codes_scanned_to_build_quantizer, codes_scanned_to_build_quantizer, codes, dis_table, mm_dis_tables.data(), qmax, k, heap_dis, heap_ids, dis0,0 , NULL, 1); delete qmax; } if (init_finalize_heap) { heap_reorder<CHeap> (k, heap_dis, heap_ids); } } } static inline float sqr (float x) { return x * x; } /** same as search, but with inner product similarity */ void search_ip (const float * x, size_t nx, const group * codes, const size_t ncodes, float_minheap_array_t *res, bool init_finalize_heap = true) const{ size_t k = res->k; FAISS_THROW_MSG("Not implemented -- need to make it more generic to use a lookup_and_update which would use CMin."); FAISS_THROW_IF_NOT (nx == res->nh); float * dis_tables = new float [nx * ksub_total]; ScopeDeleter<float> del(dis_tables); compute_inner_prod_tables (nx, x, dis_tables); #pragma omp parallel for for (size_t i = 0; i < nx; i++) { /* query preparation for asymmetric search: compute look-up tables */ const float* dis_table = dis_tables + i * ksub_total; /* Compute distances and keep smallest values */ long * __restrict heap_ids = res->ids + i * k; float * __restrict heap_dis = res->val + i * k; if (init_finalize_heap) { heap_heapify<CMin<float,long>> (k, heap_dis, heap_ids); } float dis0=0; lookup_and_update_heap(ncodes, 0, codes, dis_table,k, heap_dis, heap_ids,dis0,0 , NULL, 1); if (init_finalize_heap) { heap_reorder<CMin<float,long>> (k, heap_dis, heap_ids); } } } /// Symmetric Distance Table std::vector<float> sdc_table; int cmp(group a, size_t offset_a, group b, size_t offset_b) const { for(size_t m=0;m<M;m++){ unsigned ca = get_code_component(a, offset_a, m); unsigned cb = get_code_component(b, offset_b, m); if( ca < cb){ return -1; }else if(ca > cb){ return 1; } } return 0; } // intitialize the SDC table from the centroids void compute_sdc_table () { sdc_table.resize (M * ksub_sq_total); for (int m = 0; m < M; m++) { const float *cents = centroids.data() + centroids_offset[m]; float * dis_tab = sdc_table.data() + ksub_sq_offset[m]; // TODO optimize with BLAS for (int i = 0; i < ksub[m]; i++) { const float *centi = cents + i*dsub[m]; for (int j = 0; j < ksub[m]; j++) { float accu = 0; const float *centj = cents + j*dsub[m]; for (int k = 0; k < dsub[m]; k++) accu += sqr (centi[k] - centj[k]); dis_tab [i + ksub[m]*j] = accu; } } } } void search_sdc (const group * qcodes, size_t nq, const group * bcodes, const size_t nb, float_maxheap_array_t * res, bool init_finalize_heap = true) const { FAISS_THROW_IF_NOT (sdc_table.size() == M * ksub_sq_total); size_t k = res->k; #pragma omp parallel for for (size_t i = 0; i < nq; i++) { /* Compute distances and keep smallest values */ long * heap_ids = res->ids + i * k; float * heap_dis = res->val + i * k; if (init_finalize_heap) maxheap_heapify (k, heap_dis, heap_ids); for (size_t j = 0; j < nb; j++) { float dis = 0; for (int m = 0; m < M; m++) { const float * tab = sdc_table.data() + ksub_sq_offset[m]; int cq = get_code_component(qcodes, i, m); int cb = get_code_component(bcodes, j, m); dis += tab[cb + cq * ksub[m]]; } if (dis < heap_dis[0]) { maxheap_pop (k, heap_dis, heap_ids); maxheap_push (k, heap_dis, heap_ids, dis, j); } } if (init_finalize_heap) maxheap_reorder (k, heap_dis, heap_ids); } } inline void extract_val128(int32_t cmp, const __m128i& candidates, size_t j, bool store_pairs, long key, const long * list_ids, float dis0, int k, float* __restrict__ heap_dis, long * __restrict__ heap_ids, const VPQQuant* qmax, __m128i& bh_bound_sse) const { const std::uint64_t masktable[] = { 0x0, 0x0, 0x1, 0x100, 0x2, 0x200, 0x201, 0x20100, 0x3, 0x300, 0x301, 0x30100, 0x302, 0x30200, 0x30201, 0x3020100, 0x4, 0x400, 0x401, 0x40100, 0x402, 0x40200, 0x40201, 0x4020100, 0x403, 0x40300, 0x40301, 0x4030100, 0x40302, 0x4030200, 0x4030201, 0x403020100, 0x5, 0x500, 0x501, 0x50100, 0x502, 0x50200, 0x50201, 0x5020100, 0x503, 0x50300, 0x50301, 0x5030100, 0x50302, 0x5030200, 0x5030201, 0x503020100, 0x504, 0x50400, 0x50401, 0x5040100, 0x50402, 0x5040200, 0x5040201, 0x504020100, 0x50403, 0x5040300, 0x5040301, 0x504030100, 0x5040302, 0x504030200, 0x504030201, 0x50403020100, 0x6, 0x600, 0x601, 0x60100, 0x602, 0x60200, 0x60201, 0x6020100, 0x603, 0x60300, 0x60301, 0x6030100, 0x60302, 0x6030200, 0x6030201, 0x603020100, 0x604, 0x60400, 0x60401, 0x6040100, 0x60402, 0x6040200, 0x6040201, 0x604020100, 0x60403, 0x6040300, 0x6040301, 0x604030100, 0x6040302, 0x604030200, 0x604030201, 0x60403020100, 0x605, 0x60500, 0x60501, 0x6050100, 0x60502, 0x6050200, 0x6050201, 0x605020100, 0x60503, 0x6050300, 0x6050301, 0x605030100, 0x6050302, 0x605030200, 0x605030201, 0x60503020100, 0x60504, 0x6050400, 0x6050401, 0x605040100, 0x6050402, 0x605040200, 0x605040201, 0x60504020100, 0x6050403, 0x605040300, 0x605040301, 0x60504030100, 0x605040302, 0x60504030200, 0x60504030201, 0x6050403020100, 0x7, 0x700, 0x701, 0x70100, 0x702, 0x70200, 0x70201, 0x7020100, 0x703, 0x70300, 0x70301, 0x7030100, 0x70302, 0x7030200, 0x7030201, 0x703020100, 0x704, 0x70400, 0x70401, 0x7040100, 0x70402, 0x7040200, 0x7040201, 0x704020100, 0x70403, 0x7040300, 0x7040301, 0x704030100, 0x7040302, 0x704030200, 0x704030201, 0x70403020100, 0x705, 0x70500, 0x70501, 0x7050100, 0x70502, 0x7050200, 0x7050201, 0x705020100, 0x70503, 0x7050300, 0x7050301, 0x705030100, 0x7050302, 0x705030200, 0x705030201, 0x70503020100, 0x70504, 0x7050400, 0x7050401, 0x705040100, 0x7050402, 0x705040200, 0x705040201, 0x70504020100, 0x7050403, 0x705040300, 0x705040301, 0x70504030100, 0x705040302, 0x70504030200, 0x70504030201, 0x7050403020100, 0x706, 0x70600, 0x70601, 0x7060100, 0x70602, 0x7060200, 0x7060201, 0x706020100, 0x70603, 0x7060300, 0x7060301, 0x706030100, 0x7060302, 0x706030200, 0x706030201, 0x70603020100, 0x70604, 0x7060400, 0x7060401, 0x706040100, 0x7060402, 0x706040200, 0x706040201, 0x70604020100, 0x7060403, 0x706040300, 0x706040301, 0x70604030100, 0x706040302, 0x70604030200, 0x70604030201, 0x7060403020100, 0x70605, 0x7060500, 0x7060501, 0x706050100, 0x7060502, 0x706050200, 0x706050201, 0x70605020100, 0x7060503, 0x706050300, 0x706050301, 0x70605030100, 0x706050302, 0x70605030200, 0x70605030201, 0x7060503020100, 0x7060504, 0x706050400, 0x706050401, 0x70605040100, 0x706050402, 0x70605040200, 0x70605040201, 0x7060504020100, 0x706050403, 0x70605040300, 0x70605040301, 0x7060504030100, 0x70605040302, 0x7060504030200, 0x7060504030201, 0x706050403020100 }; if(cmp) { T_TSC bh_bound_quant; T_TSC candidates_mem[codes_per_group]; const unsigned first_code_i = 0 + j*codes_per_group; _mm_storeu_si128(reinterpret_cast<__m128i*>(candidates_mem), candidates); //_mm_prefetch(&list_ids[first_code_i],_MM_HINT_T0); // Check low quadword const std::uint8_t cmp_low = cmp & 0xff; if (cmp_low) { const int match_count = _popcnt32(cmp_low); std::uint64_t match_pos = masktable[cmp_low]; for (int i = 0; i < match_count; ++i) { const std::uint8_t pos = match_pos & 0xff; match_pos >>= 8; long id; if(store_pairs){ id = (key << 32 | (first_code_i+pos)); }else{ id = list_ids[first_code_i+pos]; } heap_pushpop<CMax<float, long>>(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos])+dis0, id); // bh_push(bh, labels, first_code_i + pos, max_scan, // candidates_mem[pos]); } } // Check high quadword const std::uint8_t cmp_high = (cmp >> 8); if (cmp_high) { const int match_count = _popcnt32(cmp_high); std::uint64_t match_pos = masktable[cmp_high] + 0x0808080808080808; for (int i = 0; i < match_count; ++i) { const std::uint8_t pos = match_pos & 0xff; match_pos >>= 8; long id; if(store_pairs){ id = (key << 32 | (first_code_i+pos)); }else{ id = list_ids[first_code_i+pos]; } heap_pushpop<CMax<float, long>>(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos])+dis0, id); } } qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); bh_bound_sse = _mm_set1_epi8(bh_bound_quant); } } inline void extract_val(__mmask32 cmp, const __m512i& candidates, size_t j, bool store_pairs, long key, const long * list_ids, float dis0, int k, float* __restrict__ heap_dis, long * __restrict__ heap_ids, const VPQQuant* qmax, __m512i& bh_bound_av512) const { const std::uint64_t masktable[] = { 0x0, 0x0, 0x1, 0x100, 0x2, 0x200, 0x201, 0x20100, 0x3, 0x300, 0x301, 0x30100, 0x302, 0x30200, 0x30201, 0x3020100, 0x4, 0x400, 0x401, 0x40100, 0x402, 0x40200, 0x40201, 0x4020100, 0x403, 0x40300, 0x40301, 0x4030100, 0x40302, 0x4030200, 0x4030201, 0x403020100, 0x5, 0x500, 0x501, 0x50100, 0x502, 0x50200, 0x50201, 0x5020100, 0x503, 0x50300, 0x50301, 0x5030100, 0x50302, 0x5030200, 0x5030201, 0x503020100, 0x504, 0x50400, 0x50401, 0x5040100, 0x50402, 0x5040200, 0x5040201, 0x504020100, 0x50403, 0x5040300, 0x5040301, 0x504030100, 0x5040302, 0x504030200, 0x504030201, 0x50403020100, 0x6, 0x600, 0x601, 0x60100, 0x602, 0x60200, 0x60201, 0x6020100, 0x603, 0x60300, 0x60301, 0x6030100, 0x60302, 0x6030200, 0x6030201, 0x603020100, 0x604, 0x60400, 0x60401, 0x6040100, 0x60402, 0x6040200, 0x6040201, 0x604020100, 0x60403, 0x6040300, 0x6040301, 0x604030100, 0x6040302, 0x604030200, 0x604030201, 0x60403020100, 0x605, 0x60500, 0x60501, 0x6050100, 0x60502, 0x6050200, 0x6050201, 0x605020100, 0x60503, 0x6050300, 0x6050301, 0x605030100, 0x6050302, 0x605030200, 0x605030201, 0x60503020100, 0x60504, 0x6050400, 0x6050401, 0x605040100, 0x6050402, 0x605040200, 0x605040201, 0x60504020100, 0x6050403, 0x605040300, 0x605040301, 0x60504030100, 0x605040302, 0x60504030200, 0x60504030201, 0x6050403020100, 0x7, 0x700, 0x701, 0x70100, 0x702, 0x70200, 0x70201, 0x7020100, 0x703, 0x70300, 0x70301, 0x7030100, 0x70302, 0x7030200, 0x7030201, 0x703020100, 0x704, 0x70400, 0x70401, 0x7040100, 0x70402, 0x7040200, 0x7040201, 0x704020100, 0x70403, 0x7040300, 0x7040301, 0x704030100, 0x7040302, 0x704030200, 0x704030201, 0x70403020100, 0x705, 0x70500, 0x70501, 0x7050100, 0x70502, 0x7050200, 0x7050201, 0x705020100, 0x70503, 0x7050300, 0x7050301, 0x705030100, 0x7050302, 0x705030200, 0x705030201, 0x70503020100, 0x70504, 0x7050400, 0x7050401, 0x705040100, 0x7050402, 0x705040200, 0x705040201, 0x70504020100, 0x7050403, 0x705040300, 0x705040301, 0x70504030100, 0x705040302, 0x70504030200, 0x70504030201, 0x7050403020100, 0x706, 0x70600, 0x70601, 0x7060100, 0x70602, 0x7060200, 0x7060201, 0x706020100, 0x70603, 0x7060300, 0x7060301, 0x706030100, 0x7060302, 0x706030200, 0x706030201, 0x70603020100, 0x70604, 0x7060400, 0x7060401, 0x706040100, 0x7060402, 0x706040200, 0x706040201, 0x70604020100, 0x7060403, 0x706040300, 0x706040301, 0x70604030100, 0x706040302, 0x70604030200, 0x70604030201, 0x7060403020100, 0x70605, 0x7060500, 0x7060501, 0x706050100, 0x7060502, 0x706050200, 0x706050201, 0x70605020100, 0x7060503, 0x706050300, 0x706050301, 0x70605030100, 0x706050302, 0x70605030200, 0x70605030201, 0x7060503020100, 0x7060504, 0x706050400, 0x706050401, 0x70605040100, 0x706050402, 0x70605040200, 0x70605040201, 0x7060504020100, 0x706050403, 0x70605040300, 0x70605040301, 0x7060504030100, 0x70605040302, 0x7060504030200, 0x7060504030201, 0x706050403020100 }; if (cmp) { T_TSC bh_bound_quant; T_TSC candidates_mem[codes_per_group]; const unsigned first_code_i = 0 + j * codes_per_group; //_mm_prefetch(&list_ids[first_code_i],_MM_HINT_T0); _mm512_storeu_si512(reinterpret_cast<__m512i*>(candidates_mem), candidates); // Check low quadword const std::uint8_t cmp_low = cmp & 0xff; if (cmp_low) { const int match_count = _popcnt32(cmp_low); std::uint64_t match_pos = masktable[cmp_low]; for (int i = 0;i < match_count;++i) { const std::uint8_t pos = match_pos & 0xff; match_pos >>= 8; long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos]) + dis0, id); // bh_push(bh, labels, first_code_i + pos, max_scan, // candidates_mem[pos]); } } // Check high quadword const std::uint8_t cmp_high = (cmp >> 8) & 0xff; if (cmp_high) { const int match_count = _popcnt32(cmp_high); std::uint64_t match_pos = masktable[cmp_high] + 0x0808080808080808; for (int i = 0;i < match_count;++i) { const std::uint8_t pos = match_pos & 0xff; match_pos >>= 8; long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos]) + dis0, id); } } // Check high quadword const std::uint8_t cmp_highb = (cmp >> 16) & 0xff; if (cmp_highb) { const int match_count = _popcnt32(cmp_highb); std::uint64_t match_pos = masktable[cmp_highb] + 0x1010101010101010; for (int i = 0;i < match_count;++i) { const std::uint8_t pos = match_pos & 0xff; match_pos >>= 8; long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos]) + dis0, id); } } // Check high quadword const std::uint8_t cmp_highc = (cmp >> 24) & 0xff; if (cmp_highc) { const int match_count = _popcnt32(cmp_highc); std::uint64_t match_pos = masktable[cmp_highc] + 0x1818181818181818; for (int i = 0;i < match_count;++i) { const std::uint8_t pos = match_pos & 0xff; match_pos >>= 8; long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos]) + dis0, id); } } qmax->quantize_sum(heap_dis[0] - dis0, &bh_bound_quant); bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); } } /* New control structure for iterating over masks */ #define FOREACH_IN_MASK32(i,m,mn) for(mn=m,i=__builtin_ctz(mn); mn != 0; mn &= ~(1lu << i),i=__builtin_ctz(mn)) #define FOREACH_IN_MASK64(i,m,mn) for(mn=m,i=__builtin_ctzl(mn); mn != 0; mn &= ~(1llu << i),i=__builtin_ctzl(mn)) /* Example usage FOREACH_IN_MASK(idx, new_nat_mask, tmpmask1){ //printf("nbtosend (new) %u\n",*nb_to_send); uint16_t customerid = extract_customer_id(pkt_burst[idx]); } */ inline void extract_val_loop(__mmask32 cmp, __m512i& candidates, size_t j, bool store_pairs, long key, const long * list_ids, float dis0, int& k, float* __restrict__ heap_dis, long * __restrict__ heap_ids, const VPQQuant* qmax, __m512i& bh_bound_av512) const { if (cmp) { T_TSC bh_bound_quant; T_TSC candidates_mem[codes_per_group]; const unsigned first_code_i = 0 + j * codes_per_group; _mm512_storeu_si512(reinterpret_cast<__m512i*>(candidates_mem), candidates); //_mm_prefetch(&list_ids[first_code_i],_MM_HINT_T0); int pos; int tmp_mask; FOREACH_IN_MASK32(pos, cmp, tmp_mask ){ long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos]) + dis0, id); } qmax->quantize_sum(heap_dis[0] - dis0, &bh_bound_quant); bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); } } inline void extract_val_loop128(unsigned cmp, const __m128i& candidates, size_t j, bool store_pairs, long key, const long * list_ids, float dis0, int& k, float* __restrict__ heap_dis, long * __restrict__ heap_ids, const VPQQuant* qmax, __m128i& bh_bound_sse) const { if (cmp) { T_TSC bh_bound_quant; T_TSC candidates_mem[codes_per_group]; const unsigned first_code_i = 0 + j * codes_per_group; _mm_storeu_si128(reinterpret_cast<__m128i*>(candidates_mem), candidates); //_mm_prefetch(&list_ids[first_code_i],_MM_HINT_T0); int pos; int tmp_mask; FOREACH_IN_MASK32(pos, cmp, tmp_mask ){ long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum(candidates_mem[pos]) + dis0, id); } qmax->quantize_sum(heap_dis[0] - dis0, &bh_bound_quant); bh_bound_sse = _mm_set1_epi8(bh_bound_quant); } } }; template<int TT_M, int TT_P, int TT_PG_0, int TT_PG_1, int TT_PG_2, int TT_PG_3, class TT_TSC, class TT_TSCMM, class TT_TSCMMXL> struct VecProductQuantizer_NoVecTable : public AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL> { typedef typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::group group; typedef typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::VPQQuant VPQQuant; VecProductQuantizer_NoVecTable(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>(d) {} VecProductQuantizer_NoVecTable() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>() {} inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * codes, const float * dis_table, TT_TSCMM * mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { FAISS_THROW_IF_NOT_MSG(this->cross_lane == 1 , "This requires quantize tables not layed out for cross-lanes operations"); for (size_t j = 0; j < ncodes; j++) { int32_t dis = 1; for (size_t m = 0; m < TT_M; m+=1) { TT_TSC* quant_dis_table = (TT_TSC*)&mm_dis_tables[this->dt_lanes_offset[m]]; int c = this->get_code_component(codes, offset+j, m); //float disf = dis_table[this->ksub_offset[m] + c]; TT_TSC dis8 = quant_dis_table[c]; //qmax.quantize_val(disf, &dis8); dis += static_cast<int32_t>(dis8); } // Apply saturation dis = std::min((int32_t)std::numeric_limits<TT_TSC>::max(), dis); long id; if(store_pairs){ id = (key << 32 | (j+offset)); }else{ id = list_ids[j+offset]; } heap_pushpop<typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::CHeap>(k, heap_dis, heap_ids, qmax->unquantize_sum(dis)+dis0, id); } } }; template<int TT_M, class TT_TSCMM, class TT_TSCMMXL> struct VecProductQuantizer_BoltNoVec : virtual public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL>::VPQQuant VPQQuant; float best_mindist; float best_maxdist; VecProductQuantizer_BoltNoVec(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL>(d) { this->initial_scan_estim_parameter=0; } VecProductQuantizer_BoltNoVec() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL>() { this->initial_scan_estim_parameter=0; } void train (int n, const float *x){ float best_loss = std::numeric_limits<float>::infinity(); // Train the PQ Code AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL>::train(n,x); // Train the distance quantizer // Pick up to 10000 vector as queries std::vector<int> perm (n); int seed=1234; rand_perm (perm.data (), n, seed); int nx = std::min(10000,n); std::vector<float> x_queries(nx * this->d); std::vector<float> true_dists(nx * this->ksub_total); for (int i = 0; i < nx; i++) memcpy (x_queries.data() + i * this->d, x + perm[i] * this->d, sizeof(float) * this->d); this->compute_distance_tables(nx,x_queries.data(),true_dists.data()); //x = nx*d, nx*M*ksub std::sort(true_dists.begin(),true_dists.end()); const std::vector<float> alphas ({.001, .002, .005, .01, .02, .05, .1}); for(float a : alphas){ float lower_quantile=true_dists[static_cast<int>(std::floor(a*nx*this->ksub_total))]; float upper_quantile=true_dists[static_cast<int>(std::ceil((1.0-a)*nx*this->ksub_total))]; float scale=255.0/(upper_quantile-lower_quantile); float loss=0.0; for(int i=0;i<nx*this->ksub_total;i++){ float quant_dist = std::min(255.0f,scale*std::max(0.0f,true_dists[i] - lower_quantile)); loss += (true_dists[i]-quant_dist) * (true_dists[i]-quant_dist); } if(loss < best_loss){ best_mindist=lower_quantile; best_maxdist=upper_quantile; best_loss=loss; } } } virtual inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * codes, const float * dis_table, TT_TSCMM * mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { for (size_t j = 0; j < ncodes; j++) { int32_t dis = 0; for (size_t m = 0; m < TT_M; m+=1) { int c = this->get_code_component(codes, offset+j, m); float disf = dis_table[this->ksub_offset[m] + c]; uint8_t dis8; // Bolt is defined for 8-bit values qmax->quantize_val(disf, &dis8,m); if(dis8 < 0) { printf("Warning, dis8 is < 0 -- %d %d %f [%f %f]\n",std::numeric_limits<uint8_t>::max(),(int)dis8,disf,qmax->min[m],qmax->max); } dis += static_cast<int32_t>(dis8); } // Apply saturation (8 or 16 bit, the paper use the 16 bit variant / the repo contain an 8 bit variant) dis = std::min((int32_t)std::numeric_limits<int16_t>::max(), dis); long id; if(store_pairs){ id = (key << 32 | (j+offset)); }else{ id = list_ids[j+offset]; } heap_pushpop<typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,TT_TSCMM,TT_TSCMMXL>::CHeap>(k, heap_dis, heap_ids, (reinterpret_cast<QuantizerMAX_bolt*>(qmax))->unquantize_sum16(dis)+dis0, id); } } VPQQuant * quantize_tables(const float* dis_table, TT_TSCMM* mm_dis_tables, float max) const override{ VPQQuant* quant = new QuantizerMAX_bolt(best_mindist,best_maxdist,TT_M); this->build_mm_tables(dis_table,mm_dis_tables,quant); return quant; } }; template<int TT_M, int TT_P, int TT_PG_0, int TT_PG_1, int TT_PG_2, int TT_PG_3, class TT_TSC, class TT_TSCMM, class TT_TSCMMXL> struct VecProductQuantizer_NoVec : public AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL> { typedef typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::group group; typedef typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::VPQQuant VPQQuant; VecProductQuantizer_NoVec(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>(d) {} VecProductQuantizer_NoVec() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>() {} inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * codes, const float * dis_table, TT_TSCMM * mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { for (size_t j = 0; j < ncodes; j++) { int32_t dis = 0; for (size_t m = 0; m < TT_M; m+=1) { int c = this->get_code_component(codes, offset+j, m); float disf = dis_table[this->ksub_offset[m] + c]; TT_TSC dis8; qmax->quantize_val(disf, &dis8,m); if(dis8 < 0) { printf("Warning, dis8 is < 0 -- %d %d %f [%f %f]\n",std::numeric_limits<TT_TSC>::max(),(int)dis8,disf,qmax->min[m],qmax->max); } dis += static_cast<int32_t>(dis8); } // Apply saturation dis = std::min((int32_t)std::numeric_limits<TT_TSC>::max(), dis); long id; if(store_pairs){ id = (key << 32 | (j+offset)); }else{ id = list_ids[j+offset]; } heap_pushpop<typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::CHeap>(k, heap_dis, heap_ids, qmax->unquantize_sum(dis)+dis0, id); } } /* We do not need (and must not since memory is not necessarily of the right size) build quantized lookup tables */ void build_mm_tables(const float* dis_table, TT_TSCMM* mm_dis_tables, QuantizerMAX<TT_TSC> * quant) const override {} }; template<int TT_M, int TT_P, int TT_PG_0, int TT_PG_1, int TT_PG_2, int TT_PG_3, class TT_TSC, class TT_TSCMM,class TT_TSCMMXL> struct VecProductQuantizer_NoVecNoQuant : public AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL> { typedef typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::group group; typedef typename AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>::VPQQuant VPQQuant; VecProductQuantizer_NoVecNoQuant(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>(d) {} VecProductQuantizer_NoVecNoQuant() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,TT_P,TT_PG_0,TT_PG_1,TT_PG_2,TT_PG_3,TT_TSC,TT_TSCMM,TT_TSCMMXL>() {} inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * codes, const float * dis_table, TT_TSCMM * mm_dis_tables, VPQQuant* qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { this->lookup_and_update_heap(ncodes, offset, codes, dis_table, k, heap_dis, heap_ids, dis0,key,list_ids,store_pairs); } /* We do not need (and must not since memory is not necessarily of the right size) build quantized lookup tables */ void build_mm_tables(const float* dis_table, TT_TSCMM* mm_dis_tables, QuantizerMAX<TT_TSC> * quant) const override {} }; template<int TT_M> struct VecProductQuantizer_4_AVX256 : public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX256(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>(d) { } VecProductQuantizer_4_AVX256() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables1, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const __m256i low_mask_avx = _mm256_set1_epi8(0x0f); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::lanes_per_code; __m256i * __restrict mm_dis_tables = (__m256i*)mm_dis_tables1; // Binheap extraction int8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m128i bh_bound_sse = _mm_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m256i twolane_sum = _mm256_setzero_si256(); // Rows 1..ROW_COUNT for(int row_i = 0; row_i < lanes_per_code/2; ++row_i) { // Lookup add (low) const __m256i comps = _mm256_load_si256(&codes[j].mmxl[row_i]); const __m256i masked = _mm256_and_si256(comps, low_mask_avx); const __m256i partiala = _mm256_shuffle_epi8(mm_dis_tables[2*row_i], masked); // Lookup add (high) const __m256i compsb = _mm256_srli_epi64(comps, 4); const __m256i maskedb = _mm256_and_si256(compsb, low_mask_avx); const __m256i partialb = _mm256_shuffle_epi8(mm_dis_tables[2*row_i+1], maskedb); const __m256i partial_sum = _mm256_adds_epi8(partiala, partialb); twolane_sum = _mm256_adds_epi8(twolane_sum,partial_sum); } // Reduce const __m128i sum_a = _mm256_extracti128_si256(twolane_sum,0); const __m128i sum_b = _mm256_extracti128_si256(twolane_sum,1); const __m128i candidates = _mm_adds_epi8(sum_a, sum_b); // Compare const __m128i compare = _mm_cmplt_epi8(candidates, bh_bound_sse); int cmp = _mm_movemask_epi8(compare); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val128(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_sse); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } } }; template<int TT_M> struct VecProductQuantizer_4_AVX256_unsigned : virtual public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX256_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>(d) { } VecProductQuantizer_4_AVX256_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables1, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const __m256i low_mask_avx = _mm256_set1_epi8(0x0f); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>::lanes_per_code; __m256i * __restrict mm_dis_tables = (__m256i*)mm_dis_tables1; // Binheap extraction uint8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m128i bh_bound_sse = _mm_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m256i twolane_sum = _mm256_setzero_si256(); // Rows 1..ROW_COUNT for(int row_i = 0; row_i < lanes_per_code/2; ++row_i) { // Lookup add (low) const __m256i comps = _mm256_load_si256(&codes[j].mmxl[row_i]); const __m256i masked = _mm256_and_si256(comps, low_mask_avx); const __m256i partiala = _mm256_shuffle_epi8(mm_dis_tables[2*row_i], masked); //twolane_sum = _mm256_adds_epu8(twolane_sum,partiala); // Lookup add (high) const __m256i compsb = _mm256_srli_epi64(comps, 4); const __m256i maskedb = _mm256_and_si256(compsb, low_mask_avx); const __m256i partialb = _mm256_shuffle_epi8(mm_dis_tables[2*row_i+1], maskedb); const __m256i partial_sum = _mm256_adds_epu8(partiala,partialb); twolane_sum = _mm256_adds_epu8(twolane_sum,partial_sum); } // Reduce const __m128i sum_a = _mm256_extracti128_si256(twolane_sum,0); const __m128i sum_b = _mm256_extracti128_si256(twolane_sum,1); const __m128i candidates = _mm_adds_epu8(sum_a, sum_b); // Compare const __m128i compare = _mm_cmplt_epu8(candidates, bh_bound_sse); int cmp = _mm_movemask_epi8(compare); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val128(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_sse); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } } }; template<int TT_M> struct VecProductQuantizer_4_AVX256_Bolt16 : public VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i> { typedef typename VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>::group group; typedef typename VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX256_Bolt16(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>(d), VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>(d) { this->initial_scan_estim_parameter=0; } VecProductQuantizer_4_AVX256_Bolt16() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>(), VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>() { this->initial_scan_estim_parameter=0; } // A specific method is required for bolt as due to upcasting, the result vector is equal to twice the T_TSCMM // Also note that it does not call unquantize_sum (which takes an uint8_t but unquantize_sum16). inline void extract_val_loop_bolt(unsigned cmp, const __m256i& candidates, size_t j, bool store_pairs, long key, const long * list_ids, float dis0, int& k, float* __restrict__ heap_dis, long * __restrict__ heap_ids, const QuantizerMAX_bolt* qmax, __m256i& bh_bound_avx) const { if (cmp) { int16_t bh_bound_quant; int16_t candidates_mem[this->codes_per_group]; const unsigned first_code_i = 0 + j * this->codes_per_group; _mm256_storeu_si256(reinterpret_cast<__m256i*>(candidates_mem), candidates); //_mm_prefetch(&list_ids[first_code_i],_MM_HINT_T0); int pos; int tmp_mask; FOREACH_IN_MASK32(pos, cmp, tmp_mask ){ long id; if (store_pairs) { id = (key << 32 | (first_code_i + pos)); } else { id = list_ids[first_code_i + pos]; } heap_pushpop<CMax<float,long > >(k, heap_dis, heap_ids, qmax->unquantize_sum16(candidates_mem[pos]) + dis0, id); } qmax->quantize_sum16(heap_dis[0] - dis0, &bh_bound_quant); bh_bound_avx = _mm256_set1_epi16(bh_bound_quant); } } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables1, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const __m256i low_mask_avx = _mm256_set1_epi8(0x0f); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::lanes_per_code; __m256i * __restrict mm_dis_tables = (__m256i*)mm_dis_tables1; // Binheap extraction int16_t bh_bound_quant; (reinterpret_cast<QuantizerMAX_bolt*>(qmax))->quantize_sum16(heap_dis[0]-dis0, &bh_bound_quant); __m256i bh_bound_avx = _mm256_set1_epi16(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m256i twolane_sum_lo = _mm256_setzero_si256(); __m256i twolane_sum_hi = _mm256_setzero_si256(); // Rows 1..ROW_COUNT for(int row_i = 0; row_i < lanes_per_code/2; ++row_i) { // Lookup add (low) const __m256i comps = _mm256_load_si256(&codes[j].mmxl[row_i]); const __m256i masked = _mm256_and_si256(comps, low_mask_avx); const __m256i partiala = _mm256_shuffle_epi8(mm_dis_tables[2*row_i], masked); const __m256i partiala_lo = _mm256_unpacklo_epi8(partiala, _mm256_setzero_si256()); const __m256i partiala_hi = _mm256_unpackhi_epi8(partiala, _mm256_setzero_si256()); // Lookup add (high) const __m256i compsb = _mm256_srli_epi64(comps, 4); const __m256i maskedb = _mm256_and_si256(compsb, low_mask_avx); const __m256i partialb = _mm256_shuffle_epi8(mm_dis_tables[2*row_i+1], maskedb); const __m256i partialb_lo = _mm256_unpacklo_epi8(partialb, _mm256_setzero_si256()); const __m256i partialb_hi = _mm256_unpackhi_epi8(partialb, _mm256_setzero_si256()); // Unpack const __m256i partial_sum_lo = _mm256_add_epi16(partiala_lo, partialb_lo); const __m256i partial_sum_hi = _mm256_add_epi16(partiala_hi, partialb_hi); twolane_sum_lo = _mm256_add_epi16(twolane_sum_lo,partial_sum_lo); twolane_sum_hi = _mm256_add_epi16(twolane_sum_hi,partial_sum_hi); } // Reduce // [LO_HI LO_LO] [HI_HI HI_LO] // [HI_HI LO_LO] [HI_LO LO_HI] => [HI_HI+HI_LO LO_HI+LO_LO] const __m256i sum_a = _mm256_blend_epi32(twolane_sum_lo,twolane_sum_hi,0xf0); const __m256i sum_b = _mm256_permute2x128_si256(twolane_sum_lo, twolane_sum_hi, 0x21); const __m256i candidates = _mm256_add_epi16(sum_a, sum_b); // Compare const __m256i compare = _mm256_cmpgt_epi16(bh_bound_avx,candidates); const __m128i compare_lo = _mm256_extracti128_si256(compare,0); const __m128i compare_hi = _mm256_extracti128_si256(compare,1); const __m128i compare8 = _mm_packs_epi16(compare_lo,compare_hi); int cmp = _mm_movemask_epi8(compare8); // Keep only 16 low bits, rest is noise. // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop_bolt(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, reinterpret_cast<QuantizerMAX_bolt*>(qmax), bh_bound_avx); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } }; }; //#endif template<int TT_M> struct VecProductQuantizer_4_AVX256_qadc : public VecProductQuantizer_4_AVX256<TT_M>{ typedef typename VecProductQuantizer_4_AVX256<TT_M>::group group; typedef typename VecProductQuantizer_4_AVX256<TT_M>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX256_qadc(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_4_AVX256<TT_M>(d) { } VecProductQuantizer_4_AVX256_qadc() : /* dimensionality of the input vectors */ VecProductQuantizer_4_AVX256<TT_M>() { } //typedef QuantizerMAX<int8_t> VPQQuant; typedef __m128i QuantTableLane; virtual VPQQuant * quantize_tables(const float* dis_table, QuantTableLane* mm_dis_tables, float max) const override{ /* Find the global per-table minimum */ float gmin_global=std::numeric_limits<float>::infinity(); float gmin_sum=0.0; float gmin_current[TT_M]; for(size_t m=0;m<TT_M;m++){ for(size_t i=0;i<this->ksub[m];i++){ gmin_global=std::min(dis_table[this->ksub_offset[m]+i],gmin_global); } } for(size_t m=0;m<TT_M;m++){ gmin_current[m]=gmin_global; gmin_sum+=gmin_global; } if(gmin_sum >= max){ /* We cannot build a quantizer as all distances quantize to infinite, the whole cell/inverted list can be skipped */ return nullptr; }else{ /* We can build a quantizer, we will need to scan the inverted list/cell */ VPQQuant * qmax = new QuantizerMAX_qadc<int8_t>(gmin_current,gmin_sum,max,TT_M); this->build_mm_tables(dis_table, mm_dis_tables, qmax); return qmax; } } }; template<int TT_M> struct VecProductQuantizer_4_AVX256_Bolt8 : public VecProductQuantizer_4_AVX256_unsigned<TT_M>, public VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>{ typedef typename VecProductQuantizer_4_AVX256_unsigned<TT_M>::group group; typedef typename VecProductQuantizer_4_AVX256_unsigned<TT_M>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX256_Bolt8(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>(d), VecProductQuantizer_4_AVX256_unsigned<TT_M>(d), VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>(d) { this->initial_scan_estim_parameter=0; } VecProductQuantizer_4_AVX256_Bolt8() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>(), VecProductQuantizer_4_AVX256_unsigned<TT_M>(), VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>() { this->initial_scan_estim_parameter=0; } using VecProductQuantizer_4_AVX256_unsigned<TT_M>::build_mm_tables; using VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>::quantize_tables; using VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>::get_initial_scan_estim_parameter; using VecProductQuantizer_BoltNoVec<TT_M,__m128i,__m256i>::train; inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables1, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { VecProductQuantizer_4_AVX256_unsigned<TT_M>::lookup_and_update_heap_simd(ncodes, offset, codes, dis_table, mm_dis_tables1, qmax, k, heap_dis, heap_ids, dis0, key, list_ids, store_pairs); } }; #ifdef __AVX512F__ template<int TT_M> struct VecProductQuantizer_4_AVX512_unsigned : public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>(d) { } VecProductQuantizer_4_AVX512_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables1, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const __m512i low_mask_avx = _mm512_set1_epi8(0x0f); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>::lanes_per_code; __m512i * __restrict mm_dis_tables = (__m512i*)mm_dis_tables1; // Binheap extraction uint8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m128i bh_bound_sse = _mm_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i fourlane_sum = _mm512_setzero_si512(); // Rows 0..ROW_COUNT for(int row_i = 0; row_i < lanes_per_code/4; ++row_i) { // Lookup add (low) const __m512i comps = _mm512_load_si512(&codes[j].mmxl[row_i]); const __m512i masked = _mm512_and_si512(comps, low_mask_avx); const __m512i partiala = _mm512_shuffle_epi8(mm_dis_tables[2*row_i], masked); //fourlane_sum = _mm512_adds_epu8(fourlane_sum,partiala); // Lookup add (high) const __m512i compsb = _mm512_srli_epi64(comps, 4); const __m512i maskedb = _mm512_and_si512(compsb, low_mask_avx); const __m512i partialb = _mm512_shuffle_epi8(mm_dis_tables[2*row_i+1], maskedb); const __m512i partial_sum = _mm512_adds_epu8(partiala,partialb); fourlane_sum = _mm512_adds_epu8(fourlane_sum,partial_sum); } // Reduce __m128i sum_a = _mm512_extracti32x4_epi32(fourlane_sum,0); __m128i sum_b = _mm512_extracti32x4_epi32(fourlane_sum,1); __m128i sum_c = _mm512_extracti32x4_epi32(fourlane_sum,2); __m128i sum_d = _mm512_extracti32x4_epi32(fourlane_sum,3); const __m128i candidates = _mm_adds_epu8( _mm_adds_epu8(sum_a,sum_b), _mm_adds_epu8(sum_c,sum_d)); // Compare const __m128i compare = _mm_cmplt_epu8(candidates, bh_bound_sse); int cmp = _mm_movemask_epi8(compare); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val128(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_sse); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } }; }; #else template<int TT_M> struct VecProductQuantizer_4_AVX512_unsigned : public VecProductQuantizer_NoVec<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i> { VecProductQuantizer_4_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVec<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>(d) { } VecProductQuantizer_4_AVX512_unsigned() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVec<TT_M,2,4,4,0,0,uint8_t,__m128i,__m512i>() { } }; #endif #ifdef __AVX512F__ template<int TT_M> struct VecProductQuantizer_4_AVX512 : public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_4_AVX512(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>(d) { } VecProductQuantizer_4_AVX512() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables1, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const __m512i low_mask_avx = _mm512_set1_epi8(0x0f); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>::lanes_per_code; __m512i * __restrict mm_dis_tables = (__m512i*)mm_dis_tables1; // Binheap extraction int8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m128i bh_bound_sse = _mm_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i fourlane_sum = _mm512_setzero_si512(); // Rows 0..ROW_COUNT for(int row_i = 0; row_i < lanes_per_code/4; ++row_i) { // Lookup add (low) const __m512i comps = _mm512_load_si512(&codes[j].mmxl[row_i]); const __m512i masked = _mm512_and_si512(comps, low_mask_avx); const __m512i partiala = _mm512_shuffle_epi8(mm_dis_tables[2*row_i], masked); //fourlane_sum = _mm512_adds_epu8(fourlane_sum,partiala); // Lookup add (high) const __m512i compsb = _mm512_srli_epi64(comps, 4); const __m512i maskedb = _mm512_and_si512(compsb, low_mask_avx); const __m512i partialb = _mm512_shuffle_epi8(mm_dis_tables[2*row_i+1], maskedb); const __m512i partial_sum = _mm512_adds_epi8(partiala,partialb); fourlane_sum = _mm512_adds_epi8(fourlane_sum,partial_sum); } // Reduce __m128i sum_a = _mm512_extracti32x4_epi32(fourlane_sum,0); __m128i sum_b = _mm512_extracti32x4_epi32(fourlane_sum,1); __m128i sum_c = _mm512_extracti32x4_epi32(fourlane_sum,2); __m128i sum_d = _mm512_extracti32x4_epi32(fourlane_sum,3); const __m128i candidates = _mm_adds_epi8( _mm_adds_epi8(sum_a,sum_b), _mm_adds_epi8(sum_c,sum_d)); // Compare const __m128i compare = _mm_cmplt_epi8(candidates, bh_bound_sse); int cmp = _mm_movemask_epi8(compare); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val128(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_sse); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } }; }; #else template<int TT_M> struct VecProductQuantizer_4_AVX512 : public VecProductQuantizer_NoVec<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i> { VecProductQuantizer_4_AVX512(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVec<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>(d) { } VecProductQuantizer_4_AVX512() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVec<TT_M,2,4,4,0,0,int8_t,__m128i,__m512i>() { } }; #endif template<int TT_M> struct VecProductQuantizer_4_SSE128 : public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m128i> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m128i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m128i>::VPQQuant VPQQuant; VecProductQuantizer_4_SSE128(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m128i>(d) { } VecProductQuantizer_4_SSE128() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m128i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const std::uint64_t low_mask = 0x0f0f0f0f0f0f0f0f; const __m128i low_mask_sse = _mm_set_epi64x(low_mask, low_mask); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,int8_t,__m128i,__m256i>::lanes_per_code; // Binheap extraction int8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m128i bh_bound_sse = _mm_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { // Subquantizer 0 const __m128i comps_01 = _mm_load_si128(&codes[j].mmxl[0]); const __m128i comps_0 = _mm_and_si128(comps_01, low_mask_sse); __m128i candidates = _mm_shuffle_epi8(mm_dis_tables[0], comps_0); // Subquantizer 1 const __m128i comps_01_shift = _mm_srli_epi64(comps_01, 4); const __m128i comps_1 = _mm_and_si128(comps_01_shift, low_mask_sse); const __m128i partial = _mm_shuffle_epi8(mm_dis_tables[1], comps_1); candidates = _mm_adds_epi8(candidates, partial); // Subquantizers 2..SQ_COUNT for(int row_i = 1; row_i < lanes_per_code; ++row_i) { const int sq_i = row_i * 2; const __m128i comps = _mm_loadu_si128(&codes[j].mmxl[row_i]); // Low comps const __m128i comps_low = _mm_and_si128(comps, low_mask_sse); const __m128i partial_low = _mm_shuffle_epi8(mm_dis_tables[sq_i], comps_low); candidates = _mm_adds_epi8(candidates, partial_low); // High comps const __m128i comps_shift = _mm_srli_epi64(comps, 4); const __m128i comps_high = _mm_and_si128(comps_shift, low_mask_sse); const __m128i partial_high = _mm_shuffle_epi8(mm_dis_tables[sq_i + 1],comps_high); candidates = _mm_adds_epi8(candidates, partial_high); } // Compare const __m128i compare = _mm_cmplt_epi8(candidates, bh_bound_sse); int cmp = _mm_movemask_epi8(compare); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val128(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_sse); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } }; }; template<int TT_M> struct VecProductQuantizer_4_SSE128_unsigned : public AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m128i> { typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m128i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m128i>::VPQQuant VPQQuant; VecProductQuantizer_4_SSE128_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m128i>(d) { } VecProductQuantizer_4_SSE128_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m128i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m128i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { const std::uint64_t low_mask = 0x0f0f0f0f0f0f0f0f; const __m128i low_mask_sse = _mm_set_epi64x(low_mask, low_mask); // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,4,4,0,0,uint8_t,__m128i,__m256i>::lanes_per_code; // Binheap extraction uint8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m128i bh_bound_sse = _mm_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m128i candidates = _mm_setzero_si128(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int sq_i = row_i * 2; const __m128i comps = _mm_loadu_si128(&codes[j].mmxl[row_i]); // Low comps const __m128i comps_low = _mm_and_si128(comps, low_mask_sse); const __m128i partial_low = _mm_shuffle_epi8(mm_dis_tables[sq_i], comps_low); candidates = _mm_adds_epu8(candidates, partial_low); // High comps const __m128i comps_shift = _mm_srli_epi64(comps, 4); const __m128i comps_high = _mm_and_si128(comps_shift, low_mask_sse); const __m128i partial_high = _mm_shuffle_epi8(mm_dis_tables[sq_i + 1],comps_high); candidates = _mm_adds_epu8(candidates, partial_high); } // Compare const __m128i compare = _mm_cmplt_epu8(candidates, bh_bound_sse); int cmp = _mm_movemask_epi8(compare); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val128(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_sse); } //compare_extract_matches_sse(candidates, bh_bound_sse, scanned, max_scan, // candidates_mem, bh, labels, labels_offset); } }; }; #ifdef __AVX512F__ template<int TT_M, int T_PG_1, int T_PG_2, int T_PG_3> struct VecProductQuantizer_XYZ_AVX512 : public AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_XYZ_AVX512(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>(d) { } VecProductQuantizer_XYZ_AVX512() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>::lanes_per_code; // Binheap extraction int16_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int offset_1 = 1 + (T_PG_1 == 6); const int offset_2 = 1 + (T_PG_2 == 6) + offset_1; const int offset_3 = 1 + (T_PG_3 == 6) + offset_2; const int sqdt_i = row_i * (offset_3); __m512i partiala, partialb, partialc; // Subquantizer 0 const __m512i comps_012 = _mm512_loadu_si512(&codes[j].mmxl[row_i]); if(T_PG_1 == 6){ partiala = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+0], comps_012, mm_dis_tables[sqdt_i+1]); }else{ partiala = _mm512_permutexvar_epi16(comps_012,mm_dis_tables[sqdt_i+0]); } candidates = _mm512_adds_epi16(candidates, partiala); // Subquantizer 1 const __m512i comps_12 = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[1]); if(T_PG_2 ==6){ partialb = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+offset_1], comps_12, mm_dis_tables[sqdt_i+offset_1+1]); }else{ partialb = _mm512_permutexvar_epi16(comps_12,mm_dis_tables[sqdt_i+offset_1]); } candidates = _mm512_adds_epi16(candidates, partialb); // Subquantizer 2 const __m512i comps_2 = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[2]); if(T_PG_3 ==6){ partialc = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+offset_2], comps_2, mm_dis_tables[sqdt_i+offset_2+1]); }else{ partialc = _mm512_permutexvar_epi16(comps_2,mm_dis_tables[sqdt_i+offset_2]); } candidates = _mm512_adds_epi16(candidates, partialc); } // Compare __mmask32 cmp = _mm512_cmplt_epi16_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M, int T_PG_1, int T_PG_2, int T_PG_3> struct VecProductQuantizer_XYZ_AVX512 : public VecProductQuantizer_NoVecTable<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i> { VecProductQuantizer_XYZ_AVX512(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>(d) { } VecProductQuantizer_XYZ_AVX512() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,int16_t,__m512i,__m512i>() { } }; #endif #ifdef __AVX512F__ // Warning, when using this template, PG_1 and PG_2 must be either 5 or 6. A 4 must come last. template<int TT_M, int T_PG_1, int T_PG_2, int T_PG_3> struct VecProductQuantizer_XYZ_AVX512_unsigned : public AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_XYZ_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>(d) { FAISS_THROW_IF_NOT(T_PG_1 >= 5 && T_PG_2 >= 5 && T_PG_1 <= 6 && T_PG_2 <= 6 && T_PG_3 <= 6); } VecProductQuantizer_XYZ_AVX512_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>() { FAISS_THROW_IF_NOT(T_PG_1 >= 5 && T_PG_2 >= 5 && T_PG_1 <= 6 && T_PG_2 <= 6 && T_PG_3 <= 6); } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>::lanes_per_code; // Binheap extraction uint16_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int offset_1 = 1 + (T_PG_1 == 6); const int offset_2 = 1 + (T_PG_2 == 6) + offset_1; const int offset_3 = 1 + (T_PG_3 == 6) + offset_2; const int sqdt_i = row_i * (offset_3); __m512i partiala, partialb, partialc; // Subquantizer 0 const __m512i comps_012 = _mm512_loadu_si512(&codes[j].mmxl[row_i]); if(T_PG_1 == 6){ partiala = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+0], comps_012, mm_dis_tables[sqdt_i+1]); }else{ partiala = _mm512_permutexvar_epi16(comps_012,mm_dis_tables[sqdt_i+0]); } candidates = _mm512_adds_epu16(candidates, partiala); // Subquantizer 1 const __m512i comps_12 = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[1]); if(T_PG_2 ==6){ partialb = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+offset_1], comps_12, mm_dis_tables[sqdt_i+offset_1+1]); }else{ partialb = _mm512_permutexvar_epi16(comps_12,mm_dis_tables[sqdt_i+offset_1]); } candidates = _mm512_adds_epu16(candidates, partialb); // Subquantizer 2 const __m512i comps_2 = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[2]); if(T_PG_3 ==6){ partialc = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+offset_2], comps_2, mm_dis_tables[sqdt_i+offset_2+1]); }else{ partialc = _mm512_permutexvar_epi16(comps_2,mm_dis_tables[sqdt_i+offset_2]); } candidates = _mm512_adds_epu16(candidates, partialc); } // Compare __mmask32 cmp = _mm512_cmplt_epu16_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M, int T_PG_1, int T_PG_2, int T_PG_3> struct VecProductQuantizer_XYZ_AVX512_unsigned : public VecProductQuantizer_NoVecTable<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i> { VecProductQuantizer_XYZ_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>(d) { } VecProductQuantizer_XYZ_AVX512_unsigned() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,3,T_PG_1,T_PG_2,T_PG_3,0,uint16_t,__m512i,__m512i>() { } }; #endif #ifdef __AVX512F__ template<int TT_M> struct VecProductQuantizer_4444_AVX512_unsigned : public AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_4444_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>(d) { } VecProductQuantizer_4444_AVX512_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>::lanes_per_code; // Mask __m512i mask_4bit=_mm512_set1_epi16(0x000f); // Binheap extraction uint16_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int offset_1 = 1; const int offset_2 = 1 + offset_1; const int offset_3 = 1 + offset_2; const int offset_4 = 1 + offset_3; const int sqdt_i = row_i * (offset_4); __m512i partiala, partialb, partialc, partiald; // Subquantizer 0 const __m512i comps_012 = _mm512_loadu_si512(&codes[j].mmxl[row_i]); const __m512i comps_0 = _mm512_and_si512(comps_012,mask_4bit); partiala = _mm512_permutexvar_epi16(comps_0,mm_dis_tables[sqdt_i+0]); candidates = _mm512_adds_epu16(candidates, partiala); // Subquantizer 1 const __m512i comps_1x = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[1]); const __m512i comps_1 = _mm512_and_si512(comps_1x,mask_4bit); partialb = _mm512_permutexvar_epi16(comps_1,mm_dis_tables[sqdt_i+offset_1]); candidates = _mm512_adds_epu16(candidates, partialb); // Subquantizer 2 const __m512i comps_2x = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[2]); const __m512i comps_2 = _mm512_and_si512(comps_2x,mask_4bit); partialc = _mm512_permutexvar_epi16(comps_2,mm_dis_tables[sqdt_i+offset_2]); candidates = _mm512_adds_epu16(candidates, partialc); // Subquantizer 3 const __m512i comps_3 = _mm512_srli_epi16(comps_012, this->csub_offset_inlane[3]); //const __m512i comps_3 = _mm512_and_si512(comps_3x,mask_4bit); Not Needed as after last shift no bits remains partiald = _mm512_permutexvar_epi16(comps_3,mm_dis_tables[sqdt_i+offset_3]); candidates = _mm512_adds_epu16(candidates, partiald); } // Compare __mmask32 cmp = _mm512_cmplt_epu16_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M> struct VecProductQuantizer_4444_AVX512_unsigned : public VecProductQuantizer_NoVecTable<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i> { VecProductQuantizer_4444_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>(d) { } VecProductQuantizer_4444_AVX512_unsigned() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,4,4,4,4,4,uint16_t,__m512i,__m512i>() { } }; #endif #ifdef __AVX512F__ template<int TT_M> struct VecProductQuantizer_88_AVX512_unsigned : public AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_88_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>(d) { } VecProductQuantizer_88_AVX512_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>::lanes_per_code; // Binheap extraction uint16_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int sqdt_i = row_i * 16; const __m512i comps_01 = _mm512_loadu_si512(&codes[j].mmxl[row_i]); /* First component */ const __m512i bit7_0 = _mm512_slli_epi16(comps_01,9); const __mmask32 bit7_0_m = _mm512_movepi16_mask(bit7_0); const __m512i bit8_0 = _mm512_slli_epi16(comps_01,8); const __mmask32 bit8_0_m = _mm512_movepi16_mask(bit8_0); const __m512i partial_0_00 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+0], comps_01, mm_dis_tables[sqdt_i+1]); const __m512i partial_0_01 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+2], comps_01, mm_dis_tables[sqdt_i+3]); const __m512i partial_0_0 = _mm512_mask_blend_epi16(bit7_0_m,partial_0_00,partial_0_01); const __m512i partial_0_10 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+4], comps_01, mm_dis_tables[sqdt_i+5]); const __m512i partial_0_11 =_mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+6], comps_01, mm_dis_tables[sqdt_i+7]); const __m512i partial_0_1 = _mm512_mask_blend_epi16(bit7_0_m,partial_0_10,partial_0_11); const __m512i partial_0 = _mm512_mask_blend_epi16(bit8_0_m,partial_0_0,partial_0_1); /* Second component */ const __m512i comps_1 = _mm512_srli_epi16(comps_01, 8); const __m512i bit7_1 = _mm512_slli_epi16(comps_01,1); const __mmask32 bit7_1_m = _mm512_movepi16_mask(bit7_1); const __m512i bit8_1 = comps_01; const __mmask32 bit8_1_m = _mm512_movepi16_mask(bit8_1); const __m512i partial_1_00 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+8], comps_1, mm_dis_tables[sqdt_i+9]); const __m512i partial_1_01 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+10], comps_1, mm_dis_tables[sqdt_i+11]); const __m512i partial_1_0 = _mm512_mask_blend_epi16(bit7_1_m,partial_1_00,partial_1_01); const __m512i partial_1_10 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+12], comps_1, mm_dis_tables[sqdt_i+13]); const __m512i partial_1_11 =_mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+14], comps_1, mm_dis_tables[sqdt_i+15]); const __m512i partial_1_1 = _mm512_mask_blend_epi16(bit7_1_m,partial_1_10,partial_1_11); const __m512i partial_1 = _mm512_mask_blend_epi16(bit8_1_m,partial_1_0,partial_1_1); const __m512i partial_sum = _mm512_adds_epu16(partial_0, partial_1); candidates = _mm512_adds_epu16(candidates, partial_sum); } // Compare __mmask32 cmp = _mm512_cmplt_epu16_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M> struct VecProductQuantizer_88_AVX512_unsigned : public VecProductQuantizer_NoVecTable<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i> { VecProductQuantizer_88_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>(d) { } VecProductQuantizer_88_AVX512_unsigned() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,2,8,8,0,0,uint16_t,__m512i,__m512i>() { } }; #endif #ifdef __AVX512F__ template<int TT_M> struct VecProductQuantizer_88_AVX512 : public AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_88_AVX512(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>(d) { } VecProductQuantizer_88_AVX512() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>::lanes_per_code; // Binheap extraction int16_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi16(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int sqdt_i = row_i * 16; const __m512i comps_01 = _mm512_loadu_si512(&codes[j].mmxl[row_i]); /* First component */ const __m512i bit7_0 = _mm512_slli_epi16(comps_01,9); const __mmask32 bit7_0_m = _mm512_movepi16_mask(bit7_0); const __m512i bit8_0 = _mm512_slli_epi16(comps_01,8); const __mmask32 bit8_0_m = _mm512_movepi16_mask(bit8_0); const __m512i partial_0_00 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+0], comps_01, mm_dis_tables[sqdt_i+1]); const __m512i partial_0_01 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+2], comps_01, mm_dis_tables[sqdt_i+3]); const __m512i partial_0_0 = _mm512_mask_blend_epi16(bit7_0_m,partial_0_00,partial_0_01); const __m512i partial_0_10 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+4], comps_01, mm_dis_tables[sqdt_i+5]); const __m512i partial_0_11 =_mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+6], comps_01, mm_dis_tables[sqdt_i+7]); const __m512i partial_0_1 = _mm512_mask_blend_epi16(bit7_0_m,partial_0_10,partial_0_11); const __m512i partial_0 = _mm512_mask_blend_epi16(bit8_0_m,partial_0_0,partial_0_1); /* Second component */ const __m512i comps_1 = _mm512_srli_epi16(comps_01, 8); const __m512i bit7_1 = _mm512_slli_epi16(comps_01,1); const __mmask32 bit7_1_m = _mm512_movepi16_mask(bit7_1); const __m512i bit8_1 = comps_01; const __mmask32 bit8_1_m = _mm512_movepi16_mask(bit8_1); const __m512i partial_1_00 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+8], comps_1, mm_dis_tables[sqdt_i+9]); const __m512i partial_1_01 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+10], comps_1, mm_dis_tables[sqdt_i+11]); const __m512i partial_1_0 = _mm512_mask_blend_epi16(bit7_1_m,partial_1_00,partial_1_01); const __m512i partial_1_10 = _mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+12], comps_1, mm_dis_tables[sqdt_i+13]); const __m512i partial_1_11 =_mm512_permutex2var_epi16(mm_dis_tables[sqdt_i+14], comps_1, mm_dis_tables[sqdt_i+15]); const __m512i partial_1_1 = _mm512_mask_blend_epi16(bit7_1_m,partial_1_10,partial_1_11); const __m512i partial_1 = _mm512_mask_blend_epi16(bit8_1_m,partial_1_0,partial_1_1); const __m512i partial_sum = _mm512_adds_epi16(partial_0, partial_1); candidates = _mm512_adds_epi16(candidates, partial_sum); } // Compare __mmask32 cmp = _mm512_cmplt_epi16_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M> struct VecProductQuantizer_88_AVX512 : public VecProductQuantizer_NoVecTable<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i> { VecProductQuantizer_88_AVX512(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>(d) { } VecProductQuantizer_88_AVX512() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,2,8,8,0,0,int16_t,__m512i,__m512i>() { } }; #endif #ifdef __AVX512VBMI__ template<int TT_M> struct VecProductQuantizer_8_AVX512_unsigned : public AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_8_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>(d) { } VecProductQuantizer_8_AVX512_unsigned() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>::lanes_per_code; // Binheap extraction uint8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int sqdt_i = row_i * 4; const __m512i comps = _mm512_loadu_si512(&codes[j].mmxl[row_i]); const __mmask64 bit8_m = _mm512_movepi8_mask(comps); const __m512i partial_0 = _mm512_permutex2var_epi8(mm_dis_tables[sqdt_i+0], comps, mm_dis_tables[sqdt_i+1]); const __m512i partial_1 = _mm512_permutex2var_epi8(mm_dis_tables[sqdt_i+2], comps, mm_dis_tables[sqdt_i+3]); const __m512i partial_sum = _mm512_mask_blend_epi8(bit8_m,partial_0,partial_1); candidates = _mm512_adds_epu8(candidates, partial_sum); } // Compare __mmask32 cmp = _mm512_cmplt_epu8_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M> struct VecProductQuantizer_8_AVX512_unsigned : public VecProductQuantizer_NoVecTable<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i> { VecProductQuantizer_8_AVX512_unsigned(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>(d) { } VecProductQuantizer_8_AVX512_unsigned() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>() { } }; #ifdef __AVX512VBMI__ template<int TT_M> struct VecProductQuantizer_8_AVX512 : public AbstractVecProductQuantizer<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i> { typedef typename AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>::group group; typedef typename AbstractVecProductQuantizer<TT_M,1,8,0,0,0,uint8_t,__m512i,__m512i>::VPQQuant VPQQuant; VecProductQuantizer_8_AVX512(size_t d) : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i>(d) { } VecProductQuantizer_8_AVX512() : /* dimensionality of the input vectors */ AbstractVecProductQuantizer<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i>() { } //__attribute__((optimize("unroll-loops"))) inline void lookup_and_update_heap_simd(size_t ncodes, size_t offset, const group * __restrict codes, const float * __restrict dis_table, __m512i *__restrict mm_dis_tables, VPQQuant * qmax, int k, float * __restrict heap_dis, long* __restrict heap_ids, float dis0, long key, const long* list_ids, bool store_pairs) const override { // Access static member of super class (shortcut) constexpr int codes_per_group = AbstractVecProductQuantizer<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i>::codes_per_group; constexpr int lanes_per_code = AbstractVecProductQuantizer<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i>::lanes_per_code; // Binheap extraction int8_t bh_bound_quant; qmax->quantize_sum(heap_dis[0]-dis0, &bh_bound_quant); __m512i bh_bound_av512 = _mm512_set1_epi8(bh_bound_quant); int start_group_index = offset / codes_per_group; int start_index_in_group = offset % codes_per_group; int last_group_index = (offset+ncodes-1)/codes_per_group; int last_index_in_group = (offset+ncodes-1) % codes_per_group; int start_mask=~((1 << start_index_in_group)-1); //2 => 0b0000011 => 0b11111100; int end_mask=((1 <<(last_index_in_group+1)) -1); //2 => 3 => 0b0000111 for (size_t j = start_group_index; j <= last_group_index; j++) { __m512i candidates = _mm512_setzero_si512(); // Subquantizers 0..SQ_COUNT for(int row_i = 0; row_i < lanes_per_code; ++row_i) { const int sqdt_i = row_i * 4; const __m512i comps = _mm512_loadu_si512(&codes[j].mmxl[row_i]); const __mmask64 bit8_m = _mm512_movepi8_mask(comps); const __m512i partial_0 = _mm512_permutex2var_epi8(mm_dis_tables[sqdt_i+0], comps, mm_dis_tables[sqdt_i+1]); const __m512i partial_1 = _mm512_permutex2var_epi8(mm_dis_tables[sqdt_i+2], comps, mm_dis_tables[sqdt_i+3]); const __m512i partial_sum = _mm512_mask_blend_epi8(bit8_m,partial_0,partial_1); candidates = _mm512_adds_epi8(candidates, partial_sum); } // Compare __mmask32 cmp = _mm512_cmplt_epi8_mask(candidates, bh_bound_av512); // Apply masks for potentially incomplete first and last groups if(unlikely(j == start_group_index)){ cmp &= start_mask; } if(unlikely(j == last_group_index)){ cmp &= end_mask; } if(cmp){ this->extract_val_loop(cmp, candidates, j, store_pairs, key, list_ids, dis0, k, heap_dis, heap_ids, qmax, bh_bound_av512); } } }; }; #else template<int TT_M> struct VecProductQuantizer_8_AVX512 : public VecProductQuantizer_NoVecTable<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i> { VecProductQuantizer_8_AVX512(size_t d) : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i>(d) { } VecProductQuantizer_8_AVX512() : /* dimensionality of the input vectors */ VecProductQuantizer_NoVecTable<TT_M,1,8,0,0,0,int8_t,__m512i,__m512i>() { } }; #endif #endif /** Applies a rotation to align the dimensions with a PQ to minimize * the reconstruction error. Can be used before an IndexPQ or an * IndexIVFPQ. The method is the non-parametric version described in: * * "Optimized Product Quantization for Approximate Nearest Neighbor Search" * Tiezheng Ge, Kaiming He, Qifa Ke, Jian Sun, CVPR'13 * */ template <class T_VPQ> struct OVPQMatrix: LinearTransform { #ifdef ECLIPSE typedef VecProductQuantizer_4_AVX256<16> T_VPQ; #endif int niter; ///< Number of outer training iterations int niter_pq; ///< Number of training iterations for the PQ int niter_pq_0; ///< same, for the first outer iteration /// if there are too many training points, resample size_t max_train_points; bool verbose; /// if d2 != -1, output vectors of this dimension explicit OVPQMatrix (int d = 0, int d2 = -1) : LinearTransform (d, d2 == -1 ? d : d2, false), niter (50), niter_pq (4), niter_pq_0 (40), verbose(false) { is_trained = false; // OPQ is quite expensive to train, so set this right. max_train_points = 256 * 256; } void train(Index::idx_t n, const float* x) override { const float * x_in = x; x = fvecs_maybe_subsample (d_in, (size_t*)&n, max_train_points, x, verbose); ScopeDeleter<float> del_x (x != x_in ? x : nullptr); // To support d_out > d_in, we pad input vectors with 0s to d_out size_t d = d_out <= d_in ? d_in : d_out; size_t d2 = d_out; #if 0 // what this test shows: the only way of getting bit-exact // reproducible results with sgeqrf and sgesvd seems to be forcing // single-threading. { // test repro std::vector<float> r (d * d); float * rotation = r.data(); float_randn (rotation, d * d, 1234); printf("CS0: %016lx\n", ivec_checksum (128*128, (int*)rotation)); matrix_qr (d, d, rotation); printf("CS1: %016lx\n", ivec_checksum (128*128, (int*)rotation)); return; } #endif if (verbose) { printf ("OPQMatrix::train: training an OPQ rotation matrix " "from %ld vectors in %dD -> %dD\n", n, d_in, d_out); } std::vector<float> xtrain (n * d); // center x { std::vector<float> sum (d); const float *xi = x; for (size_t i = 0; i < n; i++) { for (int j = 0; j < d_in; j++) sum [j] += *xi++; } for (int i = 0; i < d; i++) sum[i] /= n; float *yi = xtrain.data(); xi = x; for (size_t i = 0; i < n; i++) { for (int j = 0; j < d_in; j++) *yi++ = *xi++ - sum[j]; yi += d - d_in; } } float *rotation; if (A.size () == 0) { A.resize (d * d); rotation = A.data(); if (verbose) printf(" OPQMatrix::train: making random %ld*%ld rotation\n", d, d); float_randn (rotation, d * d, 1234); matrix_qr (d, d, rotation); // we use only the d * d2 upper part of the matrix A.resize (d * d2); } else { FAISS_THROW_IF_NOT (A.size() == d * d2); rotation = A.data(); } std::vector<float> xproj (d2 * n), pq_recons (d2 * n), xxr (d * n), tmp(d * d * 4); T_VPQ pq_regular (d2); std::vector<typename T_VPQ::group, boost::alignment::aligned_allocator<typename T_VPQ::group, 64>> codes (pq_regular.nb_groups(n)); double t0 = getmillisecs(); for (int iter = 0; iter < niter; iter++) { { // torch.mm(xtrain, rotation:t()) FINTEGER di = d, d2i = d2, ni = n; float zero = 0, one = 1; sgemm_ ("Transposed", "Not transposed", &d2i, &ni, &di, &one, rotation, &di, xtrain.data(), &di, &zero, xproj.data(), &d2i); } pq_regular.cp.max_points_per_centroid = 1000; pq_regular.cp.niter = iter == 0 ? niter_pq_0 : niter_pq; pq_regular.cp.verbose = verbose; pq_regular.train (n, xproj.data()); // Encoding/decoding is more expensive with VPQ due to the SIMD oriented layout. // FIXME : add a method to access directly the quantization error rather than doing it indirectly. pq_regular.encode_multiple (xproj.data(), codes.data(), 0, n); pq_regular.decode_multiple(codes.data(), pq_recons.data(), 0, n); float pq_err = fvec_L2sqr (pq_recons.data(), xproj.data(), n * d2) / n; if (verbose) printf (" Iteration %d (%d PQ iterations):" "%.3f s, obj=%g\n", iter, pq_regular.cp.niter, (getmillisecs () - t0) / 1000.0, pq_err); { float *u = tmp.data(), *vt = &tmp [d * d]; float *sing_val = &tmp [2 * d * d]; FINTEGER di = d, d2i = d2, ni = n; float one = 1, zero = 0; // torch.mm(xtrain:t(), pq_recons) sgemm_ ("Not", "Transposed", &d2i, &di, &ni, &one, pq_recons.data(), &d2i, xtrain.data(), &di, &zero, xxr.data(), &d2i); FINTEGER lwork = -1, info = -1; float worksz; // workspace query sgesvd_ ("All", "All", &d2i, &di, xxr.data(), &d2i, sing_val, vt, &d2i, u, &di, &worksz, &lwork, &info); lwork = int(worksz); std::vector<float> work (lwork); // u and vt swapped sgesvd_ ("All", "All", &d2i, &di, xxr.data(), &d2i, sing_val, vt, &d2i, u, &di, work.data(), &lwork, &info); sgemm_ ("Transposed", "Transposed", &di, &d2i, &d2i, &one, u, &di, vt, &d2i, &zero, rotation, &di); } pq_regular.train_type = T_VPQ::Train_hot_start; } // revert A matrix if (d > d_in) { for (long i = 0; i < d_out; i++) memmove (&A[i * d_in], &A[i * d], sizeof(A[0]) * d_in); A.resize (d_in * d_out); } is_trained = true; is_orthonormal = true; } }; template <class T> inline std::string tc_vpq(const T* n){return "_";} } // namespace faiss #endif
hello-openmp.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /*Create a thread and fork */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from OpenMP thread = %d\n", tid); /* Only master thread does this */ if (tid == 0){ nthreads = omp_get_num_threads(); //printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
magnitude.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <stdbool.h> #include "parmt_config.h" #include "compearth.h" #ifdef PARMT_USE_INTEL #include <mkl.h> #include <mkl_lapacke.h> #include <mkl_cblas.h> #include <ipps.h> #else #include <lapacke.h> #include <cblas.h> #endif #include "iscl/array/array.h" #include "iscl/linalg/linalg.h" #include "iscl/memory/memory.h" #include "iscl/statistics/statistics.h" #include "iscl/sorting/sorting.h" static double median_sorted_array(int n, const double *__restrict x); static int setG64f(const int npts, const double *__restrict__ Gxx, const double *__restrict__ Gyy, const double *__restrict__ Gzz, const double *__restrict__ Gxy, const double *__restrict__ Gxz, const double *__restrict__ Gyz, double *__restrict__ G); #define LDG 8 int parmt_computeL1Magnitude64f(const int ldm, const int nobs, const int nmtSolve, const int maxit, const double eps, const double tol, const int *__restrict__ signalPtr, const int *__restrict__ lags, const int *__restrict__ mtPtr, const double *__restrict__ Gxx, const double *__restrict__ Gyy, const double *__restrict__ Gzz, const double *__restrict__ Gxy, const double *__restrict__ Gxz, const double *__restrict__ Gyz, const double *__restrict__ mts, const double *__restrict__ d, double *__restrict__ mags) { const char *fcnm = "parmt_computeL1Magnitude64f\0"; double *G, *est, *obs, *wts, xmag; double m6[8] __attribute__((aligned(64))); int i, i1, i2, ierr, imt, jmt, k, maxlag, nptsAll, nptsPad; bool luseLag; const double p = 1.0; const double one = 1.0; const double zero = 0.0; nptsAll = signalPtr[nobs]; if (nobs < 1) { printf("%s: Error no observations\n", fcnm); return -1; } // Get the max number of lags maxlag = 0; luseLag = false; if (lags != NULL) { luseLag = true; maxlag = lags[array_absArgmax32i(nobs, lags, &ierr)]; } if (maxlag == 0){luseLag = false;} nptsPad = nptsAll + nobs*maxlag; G = memory_calloc64f(nptsPad*LDG); obs = memory_calloc64f(nptsPad); est = memory_calloc64f(nptsPad); wts = memory_calloc64f(nptsPad); // Insert the Green's functions and data into the moment tensor matrix if (!luseLag) { for (k=0; k<nobs; k++) { i1 = signalPtr[k]; i2 = signalPtr[k+1]; for (i=i1; i<i2; i++) { G[i*LDG+0] = Gxx[i]; G[i*LDG+1] = Gyy[i]; G[i*LDG+2] = Gzz[i]; G[i*LDG+3] = Gxy[i]; G[i*LDG+4] = Gxz[i]; G[i*LDG+5] = Gyz[i]; obs[i] = d[i]; } } } else { } for (imt=0; imt<nmtSolve; imt++) { // Extract the moment tensor jmt = mtPtr[imt]; for (i=0; i<6; i++) { m6[0] = mts[ldm*jmt+0]; } // Remove the scalar moment compearth_CMT2m0(1, 1, m6, &xmag); cblas_dscal(6, 1.0/xmag, m6, 1); // Create the linear model [G*m]*M0 = [est]*{M0} = {obs} cblas_dgemv(CblasRowMajor, CblasNoTrans, nptsAll, 6, one, G, LDG, m6, 1, zero, est, 1); // Solve for magnitude in the L1 norm ierr = linalg_irls64f_work(nptsAll, 1, maxit, p, eps, tol, est, obs, &xmag, wts); mags[imt] = xmag; double xmagMw; compearth_m02mw(1, CE_KANAMORI_1978, &xmag, &xmagMw); printf("%f\n", xmagMw); } memory_free64f(&wts); memory_free64f(&G); memory_free64f(&obs); memory_free64f(&est); return 0; } int parmt_computeL1StationMagnitude64f(const int ldm, const int nmt, const int npts, const int nblock, const double *__restrict__ Gxx, const double *__restrict__ Gyy, const double *__restrict__ Gzz, const double *__restrict__ Gxy, const double *__restrict__ Gxz, const double *__restrict__ Gyz, const double *__restrict__ mts, const double *__restrict__ d, double *__restrict__ mags) { const char *fcnm = "parmt_computeL1StationMagnitude64f\0"; double *G, *M, *R, *Dmat, *res, *resSort, xmag, xopt; const double one = 1.0; const double zero = 0.0; const double negOne =-one; int *perm; bool lsorted; int i, ic, idx, ierr, imt, jdx, jmt, mblock, Mrows, Ncols; const int Kcols = 6; // Number of columns of G Mrows = npts; mblock = nblock;// + computePadding6f(npts); G = memory_calloc64f(LDG*npts); R = memory_calloc64f(mblock*npts); M = memory_calloc64f(mblock*8); Dmat = memory_calloc64f(mblock*npts); res = memory_calloc64f(npts); resSort = memory_calloc64f(npts); perm = memory_calloc32i(npts); for (i=0; i<npts; i++){perm[i] = i;} // Set the observations for (i=0; i<npts; i++) { for (ic=0; ic<nblock; ic++) { Dmat[mblock*i+ic] = d[i]; } } #ifdef __INTEL_COMPILER __assume_aligned(G, 64); #endif ierr = setG64f(npts, Gxx, Gyy, Gzz, Gxy, Gxz, Gyz, G); if (ierr != 0) { printf("%s: Failed to set G\n", fcnm); return -1; } int nsorted = 0; for (jmt=0; jmt<nmt; jmt=jmt+nblock) { Ncols = MIN(nblock, nmt - jmt); // Number of columns of M // Set the observations cblas_dcopy(npts*mblock, Dmat, 1, R, 1); for (i=0; i<6; i++) { for (ic=0; ic<Ncols; ic++) { imt = jmt + ic; idx = ldm*imt + i; jdx = mblock*i; M[jdx+ic] = mts[idx];///xmag; } } // Compute R = GM - D = GM - R cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, Mrows, Ncols, Kcols, one, G, LDG, M, mblock, negOne, R, mblock); //lsorted = false; //for (i=0; i<npts; i++){perm[i] = i;} // Sort the columns of the residual matrix for (ic=0; ic<Ncols; ic++) { imt = jmt + ic; // Compute the absolute value of the residual for (i=0; i<npts; i++) { res[i] = fabs(R[ic+i*mblock]); } //cblas_dcopy(npts, &R[ic], mblock, res, 1); // Apply the old permutation sorting_applyPermutation64f_work(npts, perm, res, resSort); lsorted = sorting_issorted64f(npts, resSort, SORT_ASCENDING, &ierr); if (!lsorted) { sorting_argsort64f_work(npts, res, SORT_ASCENDING, perm); sorting_applyPermutation64f_work(npts, perm, res, resSort); } else { nsorted = nsorted + 1; } // Compute the median xopt = median_sorted_array(npts, resSort); //compearth_CMT2mw(1, 1, &mts[8*imt], &xmag); compearth_CMT2m0(1, 1, &mts[8*imt], &xmag); xopt = xopt*xmag; compearth_m02mw(1, CE_KANAMORI_1978, &xopt, &mags[imt]); } } printf("%d %d\n", nsorted, nmt); memory_free32i(&perm); memory_free64f(&G); memory_free64f(&R); memory_free64f(&M); memory_free64f(&Dmat); memory_free64f(&res); memory_free64f(&resSort); return 0; } static double median_sorted_array(int n, const double *__restrict x) { double xmed; int n2; const double half = 0.5; n2 = n/2; // Even -> average middle two elements if (fmod(n, 2) == 0) { xmed = half*(x[n2-1] + x[n2]); } // Median is middle element else { xmed = x[n2]; } return xmed; } static int setG64f(const int npts, const double *__restrict__ Gxx, const double *__restrict__ Gyy, const double *__restrict__ Gzz, const double *__restrict__ Gxy, const double *__restrict__ Gxz, const double *__restrict__ Gyz, double *__restrict__ G) { const char *fcnm = "setG64f\0"; int i; bool lalign; if (Gxx == NULL || Gyy == NULL || Gzz == NULL || Gxy == NULL || Gxz == NULL || G == NULL || npts < 1) { if (Gxx == NULL){printf("%s: Error Gxx is NULL\n", fcnm);} if (Gyy == NULL){printf("%s: Error Gyy is NULL\n", fcnm);} if (Gzz == NULL){printf("%s: Error Gzz is NULL\n", fcnm);} if (Gxy == NULL){printf("%s: Error Gxy is NULL\n", fcnm);} if (Gxz == NULL){printf("%s: Error Gxz is NULL\n", fcnm);} if (Gyz == NULL){printf("%s: ERror Gyz is NULL\n", fcnm);} if (npts < 1) { printf("%s: No points in Green's functions\n", fcnm); } return -1; } lalign = true; if (memory_isAligned(Gxx, 64) != 1 || memory_isAligned(Gyy, 64) != 1 || memory_isAligned(Gzz, 64) != 1 || memory_isAligned(Gxy, 64) != 1 || memory_isAligned(Gxz, 64) != 1 || memory_isAligned(Gyz, 64) != 1 || memory_isAligned(G, 64) != 1) { lalign = false; } if (lalign) { #pragma omp simd aligned(G, Gxx, Gyy, Gzz, Gxy, Gxz, Gyz: 64) for (i=0; i<npts; i++) { G[LDG*i+0] = Gxx[i]; G[LDG*i+1] = Gyy[i]; G[LDG*i+2] = Gzz[i]; G[LDG*i+3] = Gxy[i]; G[LDG*i+4] = Gxz[i]; G[LDG*i+5] = Gyz[i]; } } else { #pragma omp simd for (i=0; i<npts; i++) { G[LDG*i+0] = Gxx[i]; G[LDG*i+1] = Gyy[i]; G[LDG*i+2] = Gzz[i]; G[LDG*i+3] = Gxy[i]; G[LDG*i+4] = Gxz[i]; G[LDG*i+5] = Gyz[i]; } } return 0; }
MaskedInnerSpGEMM.h
#ifndef MASKED_SPGEMM_MASKED_INNER_SPGEMM_H #define MASKED_SPGEMM_MASKED_INNER_SPGEMM_H #include "../../CSR.h" #include "../../CSC.h" #include "../common.h" #include "../scan.h" /* * Manually inlined numeric and symbolic funcs */ template<class IT, class NT, class MultiplyOperation, class AddOperation> void MaskedSpGEMM1pInnerProduct(const CSR<IT, NT> &A, const CSC<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M, MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) { // Calculate number of threads and init C setNumThreads(numThreads); verifyInputs(A, B, C, M); // Estimate work IT *workPerRow = my_malloc<IT>(M.rows, false); IT work = calculateWork(A, B, M, workPerRow, numThreads); // Calculate cumulative work IT *cumulativeWork = my_malloc<IT>(M.rows, false); exclusiveScan(workPerRow, M.rows, cumulativeWork, numThreads); // Allocate memory for row sizes IT *rowNvals = my_malloc<IT>(M.rows, false); IT *threadsNvals = my_malloc<IT>(numThreads, false); // Allocate temporary memory for C's column IDs and values IT *colIds = my_malloc<IT>(M.nnz, false); NT *values = my_malloc<NT>(M.nnz, false); #pragma omp parallel num_threads(numThreads) { int thisThread = omp_get_thread_num(); // Distribute work auto[rowBeginIdx, rowEndIdx] = distributeWork(work, cumulativeWork, A.rows, numThreads, thisThread); // Get arrays for local colIDs and values IT *const colIdsLocal = colIds + M.rowptr[rowBeginIdx]; NT *const valuesLocal = values + M.rowptr[rowBeginIdx]; IT *currColId = colIdsLocal; NT *currValue = valuesLocal; // Numeric phase for (IT row = rowBeginIdx; row < rowEndIdx; ++row) { if (workPerRow[row] == 0) { rowNvals[row] = 0; } auto rowColIdBegin = currColId; for (IT j = M.rowptr[row]; j < M.rowptr[row + 1]; ++j) { IT itAIdx = A.rowptr[row]; const IT rowAEnd = A.rowptr[row + 1]; if (itAIdx == rowAEnd) { continue; } IT itBIdx = B.colptr[M.colids[j]]; const IT colBEnd = B.colptr[M.colids[j] + 1]; if (itBIdx == colBEnd) { continue; } bool active = false; NT value; while (true) { if (A.colids[itAIdx] < B.rowids[itBIdx]) { if (++itAIdx == rowAEnd) { break; } } else if (A.colids[itAIdx] > B.rowids[itBIdx]) { if (++itBIdx == colBEnd) { break; } } else { if (active) { value = addop(value, multop(A.values[itAIdx], B.values[itBIdx])); } else { active = true; value = multop(A.values[itAIdx], B.values[itBIdx]); } if (++itAIdx == rowAEnd) { break; } if (++itBIdx == colBEnd) { break; } } } if (active) { *(currColId++) = M.colids[j]; *(currValue++) = value; } } rowNvals[row] = currColId - rowColIdBegin; } threadsNvals[thisThread] = currColId - colIdsLocal; #pragma omp barrier #pragma omp master { initC(A, B, C, threadsNvals, numThreads); } #pragma omp barrier setRowOffsets(C, threadsNvals, rowBeginIdx, rowEndIdx, rowNvals, numThreads, thisThread); copyValuesToC(C, rowBeginIdx, colIdsLocal, valuesLocal, threadsNvals[thisThread]); } my_free(workPerRow, cumulativeWork, rowNvals, threadsNvals, colIds, values); } template<class IT, class NT, class MultiplyOperation, class AddOperation> void MaskedSpGEMM2pInnerProduct(const CSR<IT, NT> &A, const CSC<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M, MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) { // Calculate number of threads and init C setNumThreads(numThreads); verifyInputs(A, B, C, M); // Estimate work IT *workPerRow = my_malloc<IT>(M.rows, false); IT work = calculateWork(A, B, M, workPerRow, numThreads); // Calculate cumulative work IT *cumulativeWork = my_malloc<IT>(M.rows, false); exclusiveScan(workPerRow, M.rows, cumulativeWork, numThreads); // Allocate memory for row sizes IT *rowNvals = my_malloc<IT>(M.rows, false); IT *threadsNvals = my_malloc<IT>(numThreads, false); #pragma omp parallel num_threads(numThreads) { int thisThread = omp_get_thread_num(); // Distribute work auto[rowBeginIdx, rowEndIdx] = distributeWork(work, cumulativeWork, A.rows, numThreads, thisThread); // Symbolic phase IT nvals = 0; for (IT row = rowBeginIdx; row < rowEndIdx; ++row) { if (workPerRow[row] == 0) { rowNvals[row] = 0; continue; } IT currRowNvals = 0; for (IT j = M.rowptr[row]; j < M.rowptr[row + 1]; ++j) { IT itAIdx = A.rowptr[row]; const IT rowAEnd = A.rowptr[row + 1]; if (itAIdx == rowAEnd) { continue; } IT itBIdx = B.colptr[M.colids[j]]; const IT colBEnd = B.colptr[M.colids[j] + 1]; if (itBIdx == colBEnd) { continue; } while (true) { if (A.colids[itAIdx] < B.rowids[itBIdx]) { if (++itAIdx == rowAEnd) { break; } } else if (A.colids[itAIdx] > B.rowids[itBIdx]) { if (++itBIdx == colBEnd) { break; } } else { currRowNvals++; break; } } } rowNvals[row] = currRowNvals; nvals += rowNvals[row]; } threadsNvals[thisThread] = nvals; // init C #pragma omp barrier #pragma omp master { initC(A, B, C, threadsNvals, numThreads); } #pragma omp barrier setRowOffsets(C, threadsNvals, rowBeginIdx, rowEndIdx, rowNvals, numThreads, thisThread); // Numeric phase IT *currColId = &C.colids[C.rowptr[rowBeginIdx]]; NT *currValue = &C.values[C.rowptr[rowBeginIdx]]; for (IT row = rowBeginIdx; row < rowEndIdx; ++row) { if (workPerRow[row] == 0) { rowNvals[row] = 0; continue; } auto rowColIdBegin = currColId; for (IT j = M.rowptr[row]; j < M.rowptr[row + 1]; ++j) { IT itAIdx = A.rowptr[row]; const IT rowAEnd = A.rowptr[row + 1]; if (itAIdx == rowAEnd) { continue; } IT itBIdx = B.colptr[M.colids[j]]; const IT colBEnd = B.colptr[M.colids[j] + 1]; if (itBIdx == colBEnd) { continue; } bool active = false; NT value; while (true) { if (A.colids[itAIdx] < B.rowids[itBIdx]) { if (++itAIdx == rowAEnd) { break; } } else if (A.colids[itAIdx] > B.rowids[itBIdx]) { if (++itBIdx == colBEnd) { break; } } else { if (active) { value = addop(value, multop(A.values[itAIdx], B.values[itBIdx])); } else { active = true; value = multop(A.values[itAIdx], B.values[itBIdx]); } if (++itAIdx == rowAEnd) { break; } if (++itBIdx == colBEnd) { break; } } } if (active) { *(currColId++) = M.colids[j]; *(currValue++) = value; } } rowNvals[row] = currColId - rowColIdBegin; } } my_free(workPerRow, cumulativeWork, rowNvals, threadsNvals); } #endif //MASKED_SPGEMM_MASKED_INNER_SPGEMM_H
global_allocate.c
#include <omp.h> #include <stdint.h> #include <stdio.h> #pragma omp declare target char *global_allocate(uint32_t bufsz); int global_free(char *ptr); #pragma omp end declare target int main() { // Succeeds #pragma omp target device(0) { char *data = (char *)global_allocate(16); data[0] = 10; data[1] = 20 * data[0]; global_free(data); } if (omp_get_num_devices() > 1) { // Crashes with GPU memory error // Global allocate assumes a single gpu system #pragma omp target device(1) { char *data = (char *)global_allocate(16); data[0] = 10; data[1] = 20 * data[0]; global_free(data); } } printf("Success\n"); return 0; }
GB_unaryop__ainv_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_fp64 // op(A') function: GB_tran__ainv_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_fp64 ( uint16_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shape.h
/* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include "../dll.h" #include "../nd4jmalloc.h" #include "../templatemath.h" #include "../helpers/logger.h" #include "../pointercast.h" #include "../cnpy/cnpy.h" #include <op_boilerplate.h> #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <helpers/sharedmem.h> #endif #ifdef __CUDACC__ #define INLINEDEF inline #else #define INLINEDEF inline #endif #include "../pairwise_util.h" namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ND4J_EXPORT ShapeInformation { #ifdef __CUDACC__ __host__ __device__ #endif ShapeInformation(int *shape_ = nullptr, int *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} int *shape; int *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct ND4J_EXPORT CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool shapeEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool shapeEquals(int *shapeInfo1,int *shapeInfo2); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool strideEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool strideEquals(int *shapeInfo1,int *shapeInfo2); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool strideEquals(int *stride1,int rank1,int *stride2,int rank2); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool equalsSoft(int *shapeA, int *shapeB); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool equalsStrict(int *shapeA, int *shapeB); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int sizeAt(int *shape, int dim); template <typename T> #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void fill(T* buffer, T value, Nd4jIndex length); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void traceNew(int id); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tadIndexForLinear(int linearIndex, int tadLength); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tadLength(int *shapeInfo, int *dimension, int dimensionLength); #ifdef __CUDACC__ __host__ #endif ND4J_EXPORT bool canReshape(const int oldRank, int* oldShape, const int newRank, int* newShape, bool isFOrder); #ifdef __CUDACC__ __host__ #endif ND4J_EXPORT bool reshapeCF(const int oldRank, int* oldShape, const int newRank, int* newShape, bool isFOrder, int* target); /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeBuffer(int rank, int *shape); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeBuffer(int rank, int *shape, int *buffer); /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeBufferFortran(int rank, int *shape); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeBufferFortran(int rank, int *shape, int *output); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange, int *tmpBuffer); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange, int *tmpBuffer); #ifdef __CUDACC__ template <typename T> __device__ ND4J_EXPORT int *cuMalloc(int *buffer, long size, UnifiedSharedMemory *manager); __device__ ND4J_EXPORT int *cuMalloc(int *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int * calcStridesFortran(int *shape, int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int * calcStridesFortran(int *shape, int rank, int* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* calcStrides(int *shape, int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* calcStrides(int *shape, int rank, int* ret); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void updateStrides(int *shape, const char order); // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool isDimPermuted(const int* dimensions, const int dimSize); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* calcStridesFortran(int *shape, int rank, int startNum); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* calcStridesFortran(int *shape, int rank, int startNum, int* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* calcStrides(int *shape, int rank, int startNum); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* calcStrides(int *shape, int rank, int startNum, int* ret); /** * @param toCopy the shape to copy * @return a copy of the original struct */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT ShapeInformation *shapeCopy( ShapeInformation *toCopy); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool strideDescendingCAscendingF(int *shapeBuffer); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder, int *dimension, int dimensionLength); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride, int *buffer); /** * * @param length * @param shape * @param rearrange * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *doPermuteSwap(int length, int *shape, int *rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void doPermuteSwap(int length, int **shape, int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *permuteShapeBuffer(int *shapeBuffer,int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *createPermuteIndexes(int originalRank,int *dimension,int dimensionLength); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *computeResultShape(int *originalShapeBuffer,int *dimension,int dimensionLength); /** * This method does inplace transpose of given shapeBuffer * * @param shapeBuffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void transposeInplace(int *shapeBuffer); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT char getOrder(int length, int *shape, int *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int checkArrangeArray(int *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int isVector(int *shape, int rank); /** * When 1 dimension is the whole length of the * array */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int oneDimEqualToLength(int *shape, int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int oneDimEqualToLength(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int isVector(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool isLikeVector(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool isRowVector(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT bool isColumnVector(int *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int isMatrix(int *shape, int rank); #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isMatrix(int *shapeInfo); /** * Returns the shape portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeOf(int *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *copyOf(int length, int *toCopy); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *copyOf(int length, int *toCopy, int *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void copyTo(int length, int *from, int *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void copyTo(int length, int *from, int *to, int *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *permutedStrides(int *toPermute, int shapeRank, int *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *slice(int *shape); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int slices(int *shapeBuffer); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *sliceOfShapeBuffer(int sliceIdx,int *shapeBuffer); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int shapeInfoLength(int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int shapeInfoLength(int* shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT size_t shapeInfoByteLength(int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT size_t shapeInfoByteLength(int* shapeInfo); /** * Returns the rank portion of * an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int rank( int *buffer); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT ShapeInformation *infoFromBuffer(int *buffer); /** * Returns the stride portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *stride(int *buffer); /** * Compute the length of the given shape */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT Nd4jIndex length(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT Nd4jIndex length(std::initializer_list<int>& shape); /*** * Returns the offset portion of an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int offset(int *buffer); /** * Returns the ordering * for this shape information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT char order(int *buffer); /** * Returns the element wise stride for this information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int elementWiseStride(int *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int reductionIndexElementWiseStride(int *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int isScalar(int *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void removeIndex(int *data, int *indexes, int dataLength, int indexesLength, int *out); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int * removeIndex(int *data, int *indexes, int dataLength, int indexesLength); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* everyIndexBut(int *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ //#ifdef __CUDACC__ // __device__ //#endif // ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* ensureVectorShape(int *shape); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* createScalarShapeInfo(); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* createScalarShapeInfo(int *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *range(int from, int to); /** * Keep the given indexes * in the data */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *keep(volatile int *data, int *index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *reverseCopy(int *data, int length); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void reverseCopyTo(int *from, int *to, int length); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void reverseCopyTo(int *from, int *to, int *indexes,int length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *concat(int *arr1, int arr1Length, int *arr2, int arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *concat(int numArrays, int numTotalElements, int **arr, int *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int lengthPerSlice(int rank, int *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int sliceOffsetForTensor(int rank, int index, int *shape, int *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ //#ifdef __CUDACC__ // __host__ __device__ //#endif // // ND4J_EXPORT int offset(int index, // int rank, // shape::ShapeInformation *info, // int *dimension, // int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tensorsAlongDimension(int rank, volatile int length, volatile int *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tadsPerBlock(int blockSize, int tads); //#ifdef __CUDACC__ // __host__ __device__ //#endif // // ND4J_EXPORT int *tadShapeInfo(int index, int *xShapeInfo, int *dimension, // int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *toShapeBuffer( ShapeInformation *info); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *toShapeBuffer( ShapeInformation *info, int* ret); /** * Returns the number of elements per thread */ //#ifdef __CUDACC__ // __device__ //#endif // int numElementsPerThread(int N); /** * Returns the block starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int blockStartingIndex(int N); /** * Returns the thread starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ //#ifdef __CUDACC__ // __device__ //#endif // CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int prod(int *data, int length); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT Nd4jIndex prodLong( int *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ //#ifdef __CUDACC__ // __host__ __device__ //#endif // int rearMostLeftOverItem(int *data,int length,int *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT Nd4jIndex getOffset(Nd4jIndex baseOffset, int *shape, int *stride, int *indices,int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* createShapeInfo(int *shape, int *stride, int rank); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* createShapeInfo(int *shape, int *stride, int rank, int *buffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* ind2sub(int rank, int *shape,int index,int numIndices); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *ind2sub(int rank, int *shape,int index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void ind2sub(int rank,int *shape,int index,int numIndices,int *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void ind2sub(int rank, int *shape, int index, int *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* ind2subC(int rank, int *shape, int index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int* ind2subC(int rank, int *shape, int index, int numIndices); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void ind2subC(int rank, int *shape, int index, int numIndices, int *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void ind2subC(int rank, int *shape, int index, int *out); /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int sub2Ind(int rank, int *shape, int *indices); /** * Compute the real linear indices for the given shape and stride */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT Nd4jIndex *computeIndices(int rank, int *shape, int *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT Nd4jIndex *computeIndices( int *shapeBuffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void ind2subOrder(int *shapeInfo,int index,int numIndices,int *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void ind2subOrder(int *shapeInfo,int index,int *out); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void printShapeInfo(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void printShapeInfoLinear(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void printIntArray(int *arr,int length); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT void printArray(float *arr,int length); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder); #ifdef __CUDACC__ __host__ __device__ #endif ND4J_EXPORT int *shapeBufferOfNpy(cnpy::NpyArray arr); //#ifdef __CUDACC__ // __host__ __device__ //#endif // ND4J_EXPORT int *shapeBufferOfNpyBuffer(char *buffer); #ifdef __CUDACC__ __host__ #endif // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object ND4J_EXPORT void checkDimensions(const int rank, std::vector<int>& dimensions); #ifdef __CUDACC__ __host__ #endif // return absolute index of array min, min is sub-array of max, index to be returned is min index and corresponds to maxIdx of max array ND4J_EXPORT Nd4jIndex subArrayIndex(const int* maxShapeInfo, const int* minShapeInfo, const int maxIdx); //END HEADERS //BEGIN IMPLEMENTATIONS #ifdef __CUDACC__ template <typename T> __device__ INLINEDEF int *cuMalloc(int *buffer, long size, UnifiedSharedMemory *manager) { // if we go for 3 dimensions coord space or below - just use shared memory for that if (size <= MAX_COORD * 4) { int *ptr = new int[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD); return ptr; } else { // otherwise go to preallocated global memory :( int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid * size > PREALLOC_SIZE - size) { return (int *) malloc(size); } else { int *ret = buffer; ret += (tid * size); return ret; } } } #endif #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ INLINEDEF int *cuMalloc(int *buffer, long size) { int *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadLength(int *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadElementWiseStride(int *shapeInfo,int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool shapeEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool shapeEquals(int *shapeInfo1,int *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2)); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool strideEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool strideEquals(int *shapeInfo1,int *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool strideEquals(int *stride1,int rank1 , int *stride2, int rank2) { if(rank1 != rank2) return false; for(int i = 0; i < rank1; i++) { if(stride1[i] != stride2[i]) return false; } return true; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *computeResultShape(int *originalShapeBuffer,int *dimension,int dimensionLength) { int *retShape; int retShapeLength; if(dimensionLength == 1 && dimension[0] == 2147483647) { retShape = new int[2]; retShape[0] = 1; retShape[1] = 1; retShapeLength = 2; } else { retShape = shape::removeIndex(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength); retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength; } //ensure vector is proper shape if (retShapeLength == 1) { if (dimension[0] == 0) { int *newRetShape = new int[2]{1, retShape[0]}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } else { int *newRetShape = new int[2]{retShape[0], 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } } else if (retShapeLength == 0) { int *newRetShape = new int[2]{1, 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } int *ret = shape::shapeBuffer(retShapeLength,retShape); delete[] retShape; return ret; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride, int *buffer) { int *theShape = shape::shapeOf(shapeInfo); int *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; int *ret = buffer; //set the rank ret[0] = rank; int *retShape = shape::shapeOf(ret); int *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { int newStride[2] = {theStride[dimension[0]],1}; int newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { int newStride[2] = {theStride[dimension[0]],1}; int newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { int newStride[2] = {1,theStride[dimension[0]]}; int newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { int *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); int *ret = new int[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * createShapeInfo(int *shape, int *stride, int rank) { traceNew(5); int *ret = new int[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * createShapeInfo(int *shape, int *stride, int rank, int *buffer) { buffer[0] = rank; int *retShape = shape::shapeOf(buffer); int *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * calcStridesFortran(int *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); int *ret = new int[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); int *stride = new int[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * calcStridesFortran(int *shape, int rank, int startNum, int *ret) { if (isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; int st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * calcStrides(int *shape, int rank, int startNum) { traceNew(7); int *stride = new int[rank]; if (rank == 1) { stride[0] = 1; return stride; } if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) stride[i] = 1; return stride; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * calcStrides(int *shape, int rank, int startNum, int* ret) { if (rank == 1) { ret[0] = 1; return ret; } if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * calcStridesFortran(int *shape, int rank) { return calcStridesFortran(shape, rank, 1); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * calcStridesFortran(int *shape, int rank, int* ret) { return calcStridesFortran(shape, rank, 1, ret); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* calcStrides(int *shape, int rank) { return calcStrides(shape, rank, 1); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* calcStrides(int *shape, int rank, int* ret) { return calcStrides(shape, rank, 1, ret); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void updateStrides(int *shape, const char order) { int rank = shape[0]; int doubleRank = 2*rank; if(order == 'c') { if (rank > 0) { shape[doubleRank] = 1; // set unity as last stride for c order for(int j=1; j<rank; ++j) shape[doubleRank-j] = shape[doubleRank-j+1]*shape[rank+1-j]; } } else { if (rank > 0) { shape[rank+1] = 1; // set unity as first stride for f order for(int j=rank+1; j<doubleRank; ++j) shape[j+1] = shape[j]*shape[j-rank]; } } // set last 3 elements in shape shape[doubleRank + 1] = 0; shape[doubleRank + 2] = 1; shape[doubleRank + 3] = (int)order; } // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool isDimPermuted(const int* dimensions, const int dimSize ) { for(int i=0; i<dimSize-1; ++i) if(dimensions[i] > dimensions[i+1]) return true; return false; } /** * @param toCopy the shape to copy * @return a copy of the original struct */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF ShapeInformation *shapeCopy( ShapeInformation *toCopy) { ShapeInformation *copy = new ShapeInformation; traceNew(8); copy->shape = new int[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(int)); traceNew(9); copy->stride = new int[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder) { if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; int *olddims = shape::copyOf(rank, shape); int *oldstrides = shape::copyOf(rank, stride); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; traceNew(10); int *newStrides = new int[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; int *newShape = new int[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < rank; oi++) { if (shape[oi] != 1) { olddims[oldnd] = shape[oi]; oldstrides[oldnd] = stride[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newShapeRank; ni++) { np *= newShape[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newShapeRank && oi < oldnd) { np = newShape[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } ni = nj++; oi = oj++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (ni >= 1) last_stride *= newShape[ni - 1]; } for (nk = ni; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return ret; } } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return -1; } /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeBuffer(int rank, int *shape) { int *stride = shape::calcStrides(shape, rank); traceNew(11); shape::ShapeInformation * shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; int *shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeBuffer(int rank, int *shape, int *buffer) { int stride[MAX_RANK]; shape::calcStrides(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'c'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, buffer); return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeBufferFortran(int rank, int *shape) { int *stride = shape::calcStridesFortran(shape,rank); traceNew(12); shape::ShapeInformation * shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; int *shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeBufferFortran(int rank, int *shape, int *output) { int stride[MAX_RANK]; shape::calcStridesFortran(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'f'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, output); return output; } /** * Compute the real linear indices for the given shape and stride */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex *computeIndices(int rank, int *shape, int *stride) { Nd4jIndex length = shape::prodLong(shape,rank); traceNew(13); Nd4jIndex *ret = new Nd4jIndex[length]; for(int i = 0; i < length; i++) { int *idx = shape::ind2sub(rank, shape, i); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex *computeIndices(int *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int sub2Ind(int rank, int *shape, int *indices) { int index = 0; int shift = 1; for(int i = 0; i < rank; i++) { index += shift * indices[i]; shift *= shape[i]; } return index; } template <typename T> #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void fill(T* buffer, T value, Nd4jIndex length) { #pragma omp simd for (int e = 0; e < length; e++) buffer[e] = value; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* ind2sub(int rank, int *shape, int index,int numIndices) { traceNew(14); int *ret = new int[rank]; ind2sub(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* ind2sub(int rank, int *shape, int index) { return ind2sub(rank,shape, index,shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void ind2sub(int rank, int *shape, int index, int numIndices, int *ret) { int denom = numIndices; for(int i = rank - 1; i >= 0; i--) { denom /= shape[i]; ret[i] = index / denom; index %= denom; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void ind2sub(int rank,int *shape,int index, int *out) { ind2sub(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * ind2subC(int rank, int *shape, int index, int numIndices) { traceNew(15); int *ret = new int[rank]; ind2subC(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *ind2subC(int rank, int *shape, int index) { return ind2subC(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void ind2subC(int rank, int *shape, int index, int numIndices, int *ret) { int denom = numIndices; for(int i = 0; i < rank; i++) { denom /= shape[i]; if(denom > 0) { ret[i] = index / denom; index %= denom; } else ret[i] = 0; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void ind2subC(int rank, int *shape, int index, int *out) { ind2subC(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void ind2subOrder(int *shapeInfo, int index, int numIndices,int *out) { if(shape::order(shapeInfo) == 'f') { shape::ind2sub( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } else { shape::ind2subC( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void ind2subOrder(int *shapeInfo, int index, int *out) { ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ /** * * @param length * @param shape * @param rearrange * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *doPermuteSwap(int length, int *shape, int *rearrange) { traceNew(16); int *ret = new int[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void doPermuteSwap(int length, int **shape, int *rearrange) { bool inOrder = true; for(int i = 0; i < length - 1; i++) { inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1]; } //all in order, nothing to do if(inOrder) return; int *shapeDeref = *shape; //we know they are just reversed, dimension length of 2 if(length == 2) { int shapeFirst = shapeDeref[0]; int shapeSecond = shapeDeref[1]; shapeDeref[0] = shapeSecond; shapeDeref[1] = shapeFirst; return; } int *temp = new int[length]; memcpy(temp,shapeDeref,sizeof(int) * length); for (int i = 0; i < length; i++) { shapeDeref[i] = temp[rearrange[i]]; } delete[] temp; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out) { if(shapeBuffer != out) memcpy(out,shapeBuffer,sizeof(int) * shape::shapeInfoLength(shape::rank(shapeBuffer))); doPermuteShapeBuffer(shape::rank(shapeBuffer),shapeBuffer,rearrange,out); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *permuteShapeBuffer(int *shapeBuffer,int *rearrange) { int len = shape::shapeInfoLength(shape::rank(shapeBuffer)); int *copy = shape::copyOf(len,shapeBuffer); doPermuteShapeBuffer(copy,rearrange); return copy; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange, int *tmpBuffer) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); int *rearrangeCopy1 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; int *rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrangeCopy2); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); shapeBuffer[shape::shapeInfoLength(rank) - 2] = -1; delete[] rearrangeCopy2; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange, int *tmpBuffer) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); if(shapeBuffer != tmpBuffer) shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rank) - 2] = -1; shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *createPermuteIndexes(int originalRank,int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); int *ret = new int[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF char getOrder(int length, int *shape, int *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int checkArrangeArray(int *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void traceNew(int id) { //printf("new happened: [%i]\n", id); #ifndef __CUDACC__ //fflush(stdout); #endif } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isVector(int *shape, int rank) { if (rank == 1) return 1; if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool isLikeVector(int *shapeInfo) { int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) ++numOfNonUnity; } return numOfNonUnity == 1 && shapeInfo[0] > 2; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isVector(int *shapeInfo) { return isVector(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool isRowVector(int *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && shapeFirstOne; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool isColumnVector(int *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && !shapeFirstOne; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int oneDimEqualToLength(int *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int oneDimEqualToLength(int *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isMatrix(int *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isMatrix(int *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeOf(int *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *copyOf(int length, int *toCopy) { traceNew(18); int *ret = new int[length]; return copyOf(length, toCopy, ret); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *copyOf(int length, int *toCopy, int *ret) { memcpy(ret, toCopy, sizeof(int)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void copyTo(int length, int *from, int *to) { memcpy(to, from, sizeof(int)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void copyTo(int length, int *from, int *to, int *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *permutedStrides(int *toPermute, int shapeRank, int *rearrange) { int *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); int *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *slice(int *shape) { return shape + 1; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int slices(int *shapeBuffer) { return shape::shapeOf(shapeBuffer)[0]; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *sliceOfShapeBuffer(int sliceIdx,int *shapeBuffer) { int rank = shape::rank(shapeBuffer); int newRank = rank - 1; if(newRank < 2) newRank = 2; int *newShapeBuffer = new int[shape::shapeInfoLength(newRank)]; newShapeBuffer[0] = newRank; int *currShape = shape::shapeOf(shapeBuffer); int *currStride = shape::stride(shapeBuffer); //initialize new shape and stride by taking the shape and stride + 1 //and adding to the shape information //a slice is always just taking the existing shape and cutting the first index off //of the shape and stride int *newShape = shape::shapeOf(newShapeBuffer); int *newStride = shape::stride(newShapeBuffer); if(shape::isVector(shapeBuffer)) { int *currShape = shape::shapeOf(shapeBuffer); //row vector: slice index 0 is a valid index, just copy the whole thing if(currShape[0] == 1) { if(sliceIdx == 0) { memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoLength(shape::rank(shapeBuffer) * sizeof(int))); return newShapeBuffer; } } //column vector: this will be a scalar else { delete[] newShapeBuffer; int *scalar = shape::createScalarShapeInfo(); int offset = shape::offset(shapeBuffer); scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx; return scalar; } } else if(shape::isMatrix(shapeBuffer)) { newShape[0] = 1; newShape[1] = currShape[1]; newStride[0] = 1; newStride[1] = currStride[1]; } else { for(int i = 0; i < newRank; i++) { newShape[i] = currShape[i + 1]; newStride[i] = currStride[i + 1]; } } int *indices = new int[rank]; memset((void *) indices,0,rank * sizeof(int)); indices[0] = sliceIdx; Nd4jIndex offset = shape::getOffset(0,newShape,newStride,indices,rank); newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset; if(shape::isMatrix(shapeBuffer)) { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1]; } else { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer); } newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1); delete[] indices; return newShapeBuffer; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int shapeInfoLength(int* shape) { return shapeInfoLength(shape[0]); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF size_t shapeInfoByteLength(int rank) { //FIXME magic numbers return (rank * 2 + 4) * sizeof(int); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF size_t shapeInfoByteLength(int* shapeInfo) { //FIXME magic numbers return shapeInfoByteLength(shapeInfo[0]); } /** * Returns the rank portion of * an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int rank( int *buffer) { return buffer[0]; } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF ShapeInformation *infoFromBuffer(int *buffer) { traceNew(19); ShapeInformation *info = new ShapeInformation; int length = shapeInfoLength(rank(buffer)); int rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; int *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *stride( int *buffer) { return buffer + (1 + rank(buffer)); } /** * Compute the length of the given shape */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex length(int *shapeInfo) { if (shape::rank(shapeInfo) == 0) return 1L; return shape::prodLong(shape::shapeOf(shapeInfo), shape::rank(shapeInfo)); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex length(std::initializer_list<int>& shape) { Nd4jIndex ret = 1; for (auto v : shape) { ret *= v; } return ret; } /*** * Returns the offset * portion of an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int offset(int *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF char order(int *buffer) { //FIXME magic numbers return (char) buffer[(buffer[0] * 2 + 4) - 1]; } /** * Returns the element wise stride for this information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int elementWiseStride(int *buffer) { return buffer[shapeInfoLength(buffer[0]) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int reductionIndexElementWiseStride(int *buffer, int *dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { //int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; //return tadElementWiseStride; int tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ int tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isScalar(int *info) { if (shape::rank(info) == 0) return 1; if (shape::rank(info) > 2) return 0; if (shape::rank(info) == 1) return shape::shapeOf(info)[0] == 1; else if (rank(info) == 2) { return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; } return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int isScalar(volatile ShapeInformation *info) { if (info->rank > 2) return 0; if (info->rank == 1) return info->shape[0] == 1; else if (info->rank == 2) { return info->shape[0] == 1 && info->shape[1] == 1; } return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void removeIndex(int *data, int *indexes, int dataLength, int indexesLength, int *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int * removeIndex(int *data, int *indexes, int dataLength, int indexesLength) { int lengthOfArr = dataLength - indexesLength; if(lengthOfArr < 0) { printf("Remove index call created a <= 0 length array. This was likely not intended."); } int *ret = new int[lengthOfArr]; removeIndex(data,indexes,dataLength,indexesLength,ret); return ret; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* everyIndexBut(int *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); int *ret = new int[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *ensureVectorShape(int *shape, int dimension) { traceNew(21); int *ret = new int[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *ensureVectorShape(int *shape) { return ensureVectorShape(shape, 0); } /** * This method does STRICT comparison for two shape buffers * * @param shape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool equalsStrict(int *shapeA, int *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int sizeAt(int *shape, int dim) { if (dim >= 0) return shape[1+dim]; else return shape[1+(rank(shape) + dim)]; } /** * This method does SOFT comparison for two shape buffers, we compare only rank & shapes * * @param shape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool equalsSoft(int *shapeA, int *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we compare only shapes, and ignoring stride & ews int length = shapeA[0]; for (int e = 1; e <= length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } /** * Generate an int buffer * up to the given length * at the specified increment * */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; int *ret; traceNew(22); if(diff / increment < 1) ret = new int[1]; else ret = new int[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *range(int from, int to) { return range(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *keep(volatile int *data, int *index, int indexLength, int dataLength) { traceNew(23); int *ret = new int[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *reverseCopy(int *data, int length) { if (length < 1) return nullptr; traceNew(24); int *copy = new int[length]; for (int i = 0; i <= length / 2; i++) { int temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void reverseCopyTo(int *from, int *to, int length) { if (length < 1) return; for (int i = 0; i <= length / 2; i++) { int temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void reverseCopyTo(int *from, int *to, int *indexes, int length) { if (length < 1) return; for (int i = 0; i <= length / 2; i++) { int temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *concat(int *arr1, int arr1Length, int *arr2, int arr2Length) { traceNew(25); int *ret = new int[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(int)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(int)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *concat(int numArrays, int numTotalElements, int **arr, int *lengths) { int *ret = new int[numTotalElements]; int count = 0; for (int i = 0; i < numArrays; i++) { for (int j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int lengthPerSlice(int rank, int *shape, int *dimension, int dimensionLength) { if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { return shape::prod(shape,rank); } } else if(rank == dimensionLength) return shape::prod(shape,rank); int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); int *ret2 = shape::removeIndex(shape,dimension,rank,dimensionLength); int ret = prod(ret2, absSelta); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int sliceOffsetForTensor(int rank, int index, int *shape, int *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength) { int tensorLength = prodLong(tensorShape, tensorShapeLength); int lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } int offset = index * tensorLength / lengthPerSlice2; return offset; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) { int offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ INLINEDEF __device__ int tadOffset(int *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tensorsAlongDimension(volatile int rank, volatile int length, volatile int *shape, int *dimension, int dimensionLength) { int *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); int ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength) { int *keepShape = shape::shapeOf(shapeInfo); int *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); int ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex getOffset(Nd4jIndex baseOffset, int *shape, int *stride, int *indices, int rank) { Nd4jIndex offset = baseOffset; for(int i = 0; i < rank; i++) { if(indices[i] >= shape[i] && shape[i] != 1) { printf("Index %d [%d] must not be >= shape[%d].\n", i,indices[i],shape[i]); return -1; } if(shape[i] != 1) { offset += (Nd4jIndex) indices[i] * (Nd4jIndex) stride[i]; } } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ #ifdef __CUDACC__ __device__ __host__ #endif INLINEDEF int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadsPerBlock(int blockSize, int tads) { return nd4j::math::nd4j_ceil<double>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *toShapeBuffer( ShapeInformation *info) { traceNew(29); int *ret = new int[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *toShapeBuffer( ShapeInformation *info, int* ret) { int count = 1; int rank = info->rank; ret[0] = info->rank; if (ret[0] == 0) { ret[1] = 0; ret[2] = 1; ret[3] = 99; return ret; } for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void printIntArray(int *arr,int length) { for(int i = 0; i < length; i++) { printf(" %d ",arr[i]); } printf("\n"); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void printShapeInfo(int *shapeInfo) { int rank = shape::rank(shapeInfo); int *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %d ",shape[i]); } printf("\n"); int *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %d ",stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void printShapeInfoLinear(int *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("ShapeInfo: ["); for (int i = 0; i < lim; i++) { printf("%i", shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF void transposeInplace(int *shapeBuffer) { int rank = shape::rank(shapeBuffer); int *shape = shape::shapeOf(shapeBuffer); int *strides = shape::stride(shapeBuffer); // swap shape for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = shape[idx2]; shape[idx2] = shape[idx1]; shape[idx1] = tmp; } // swap strides for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = strides[idx2]; strides[idx2] = strides[idx1]; strides[idx1] = tmp; } if (shape::order(shapeBuffer) == 'c') shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102; else shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99; } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* createScalarShapeInfo() { traceNew(30); int *shape = new int[2]; shape[0] = 1; shape[1] = 1; int *stride = new int[2]; stride[0] = 1; stride[1] = 1; ShapeInformation *shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 2; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; shapeInformation2->order = 99; int *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; delete[] shape; delete[] stride; return ret; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int* createScalarShapeInfo(int *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int prod(int *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF Nd4jIndex prodLong( int *data, int length) { Nd4jIndex prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int rearMostLeftOverItem(int *data, int *dimension,int dimensionLength) { int *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ INLINEDEF void sweepShapeInfoBuffer(int *shapeInfoBuffer, int *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = shape::shapeInfoLength(rank); for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeBufferOfNpy(cnpy::NpyArray arr) { return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int* )arr.shape.data(),arr.fortranOrder); } //#ifdef __CUDACC__ // __host__ __device__ //#endif // INLINEDEF int *shapeBufferOfNpyBuffer(char *buffer) { // unsigned int *shape; // unsigned int ndims, wordSize; // bool fortranOrder; // cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder); // int * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder); // delete[] shape; // return ret; // } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF int *shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder) { if(fortranOrder) { int *shapeBufferRet = shape::shapeBufferFortran(rank,(int *) shape); return shapeBufferRet; } else { int *newShape = new int[rank]; for(int i = 0; i < rank; i++) { newShape[i] = shape[i]; } int *shapeBufferRet = shape::shapeBuffer(rank,newShape); delete[] newShape; return shapeBufferRet; } } #ifdef __CUDACC__ __host__ __device__ #endif INLINEDEF bool strideDescendingCAscendingF(int *shapeBuffer) { int rank = shape::rank(shapeBuffer); int *strides = shape::stride(shapeBuffer); char order = shape::order(shapeBuffer); if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1) return true; if (order == 'c') { for (int i = 1; i < rank; i++) if (strides[i-1] <= strides[i]) return false; return true; } else if (order == 'f') { for (int i = 1; i < rank; i++) if (strides[i-1] >= strides[i]) return false; return true; } else { printf("Unknown order for array!\n"); return false; } } #ifdef __CUDACC__ __host__ #endif INLINEDEF bool reshapeCF(const int oldRank, int* oldShape, const int newRank, int* newShapeOf, bool isFOrder, int* target) { int oldnd; int* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); int* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; int* newStrides = new int[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = shape::elementWiseStride(oldShape); } if (isFOrder && ni >= 1) { last_stride *= newShapeOf[ni - 1]; } for (nk = ni; nk < newRank; nk++) { newStrides[nk] = last_stride; } target[0] = newRank; int cnt = 1; for (int e = 0; e < newRank; e++) target[cnt++] = newShapeOf[e]; for (int e = 0; e < newRank; e++) target[cnt++] = newStrides[e]; target[shape::shapeInfoLength(newRank) - 3] = 0; target[shape::shapeInfoLength(newRank) - 2] = -1; target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99; delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } #ifdef __CUDACC__ __host__ #endif INLINEDEF bool canReshape(const int oldRank, int* oldShape, const int newRank, int* newShapeOf, bool isFOrder) { int oldnd; int* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); int* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; int* newStrides = new int[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } #ifdef __CUDACC__ __host__ #endif // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object INLINEDEF void checkDimensions(const int rank, std::vector<int>& dimensions) { int dimSize = dimensions.size(); if(dimSize == 0) throw "shape::checkDimensions method: array of dimensions is empty!"; // check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim| for(auto& dim : dimensions) if(dim < 0) dim += rank; // sort input array of dimensions, this operation is also necessary for creating TAD object in external methods if (dimSize > 1) { std::sort(dimensions.begin(), dimensions.end()); // remove duplicates if they are present dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end()); } // check whether number of dimensions is to big (>rank) dimSize = dimensions.size(); if(dimSize > rank) throw "shape::checkDimensions method: number of input dimensions is too big ( > rank of array) !"; // check if min dimension is still negative and whether max dimension is bigger then rank-1 if(dimensions[0] < 0 || dimensions.back() > (rank-1)) throw "shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !"; return; } #ifdef __CUDACC__ __host__ #endif // return absolute index of array min, min is sub-array of max, index to be returned is min's index and corresponds to maxIdx of max array INLINEDEF Nd4jIndex subArrayIndex(const int* maxShapeInfo, const int* minShapeInfo, const int maxIdx) { int *idxPerRank = new int[maxShapeInfo[0]]; ind2subC(maxShapeInfo[0], const_cast<int*>(maxShapeInfo)+1, const_cast<int&>(maxIdx), idxPerRank); Nd4jIndex minIdx = 0; for(int i = 0; i < minShapeInfo[0]; ++i) { if(minShapeInfo[minShapeInfo[0] - i] == 1 || idxPerRank[maxShapeInfo[0] - i - 1] == 0) continue; if(idxPerRank[maxShapeInfo[0] - i - 1] >= minShapeInfo[minShapeInfo[0] - i]) idxPerRank[maxShapeInfo[0] - i - 1] %= minShapeInfo[minShapeInfo[0] - i]; minIdx += idxPerRank[maxShapeInfo[0] - i - 1] * stride(const_cast<int*>(minShapeInfo))[minShapeInfo[0] - i - 1]; } delete[] idxPerRank; return minIdx; } } #endif /* SHAPE_H_ */
data.c
#include <stdio.h> #include <omp.h> int main() { int var = 1; printf("Variable before parallel region: %i\n", var); #pragma omp parallel private(var) { var = 0; printf("Variable at first: %i\n", var); var = omp_get_thread_num(); printf("Variable after `get_thread_num()`: %i\n", var); } printf("Variable after parallel region: %i\n", var); return 0; }
util.c
/* * Copyright © 2011 Siarhei Siamashka <siarhei.siamashka@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <string.h> #include <stdint.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #include <omp.h> #include "util.h" void aligned_block_copy(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t t1, t2, t3, t4; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { t1 = *src++; t2 = *src++; t3 = *src++; t4 = *src++; *dst++ = t1; *dst++ = t2; *dst++ = t3; *dst++ = t4; t1 = *src++; t2 = *src++; t3 = *src++; t4 = *src++; *dst++ = t1; *dst++ = t2; *dst++ = t3; *dst++ = t4; } } void aligned_block_copy_backwards(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t t1, t2, t3, t4; ssize_t offset = (ssize_t)size; src += size / 8 - 1; dst += size / 8 - 1; while ((offset -= 64) >= 0) { t1 = *src--; t2 = *src--; t3 = *src--; t4 = *src--; *dst-- = t1; *dst-- = t2; *dst-- = t3; *dst-- = t4; t1 = *src--; t2 = *src--; t3 = *src--; t4 = *src--; *dst-- = t1; *dst-- = t2; *dst-- = t3; *dst-- = t4; } } /* * Walk memory addresses in the backwards direction, but still * copy each individual 32 byte block in the forward direction. */ void aligned_block_copy_backwards_bs32(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t t1, t2, t3, t4; ssize_t offset = (ssize_t)size; src += size / 8 - 8; dst += size / 8 - 8; while ((offset -= 64) >= 0) { t1 = src[4]; t2 = src[5]; t3 = src[6]; t4 = src[7]; dst[4] = t1; dst[5] = t2; dst[6] = t3; dst[7] = t4; t1 = src[0]; t2 = src[1]; t3 = src[2]; t4 = src[3]; dst[0] = t1; dst[1] = t2; dst[2] = t3; dst[3] = t4; src -= 8; dst -= 8; } } /* * Walk memory addresses in the backwards direction, but still * copy each individual 64 byte block in the forward direction. */ void aligned_block_copy_backwards_bs64(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t t1, t2, t3, t4; ssize_t offset = (ssize_t)size; src += size / 8 - 8; dst += size / 8 - 8; while ((offset -= 64) >= 0) { t1 = src[0]; t2 = src[1]; t3 = src[2]; t4 = src[3]; dst[0] = t1; dst[1] = t2; dst[2] = t3; dst[3] = t4; t1 = src[4]; t2 = src[5]; t3 = src[6]; t4 = src[7]; dst[4] = t1; dst[5] = t2; dst[6] = t3; dst[7] = t4; src -= 8; dst -= 8; } } void aligned_block_copy_pf32(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t t1, t2, t3, t4; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { __builtin_prefetch(src + 32, 0, 0); t1 = *src++; t2 = *src++; t3 = *src++; t4 = *src++; *dst++ = t1; *dst++ = t2; *dst++ = t3; *dst++ = t4; __builtin_prefetch(src + 32, 0, 0); t1 = *src++; t2 = *src++; t3 = *src++; t4 = *src++; *dst++ = t1; *dst++ = t2; *dst++ = t3; *dst++ = t4; } } void aligned_block_copy_pf64(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t t1, t2, t3, t4; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { __builtin_prefetch(src + 32, 0, 0); t1 = *src++; t2 = *src++; t3 = *src++; t4 = *src++; *dst++ = t1; *dst++ = t2; *dst++ = t3; *dst++ = t4; t1 = *src++; t2 = *src++; t3 = *src++; t4 = *src++; *dst++ = t1; *dst++ = t2; *dst++ = t3; *dst++ = t4; } } void aligned_block_fetch(int64_t *__restrict dst, int64_t *__restrict src_, size_t size) { volatile int64_t *src = src_; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { *src++; *src++; *src++; *src++; *src++; *src++; *src++; *src++; } } void aligned_block_fill(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t data = *src; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { *dst++ = data; *dst++ = data; *dst++ = data; *dst++ = data; *dst++ = data; *dst++ = data; *dst++ = data; *dst++ = data; } } /* * Simulate reshuffled memory write accesses to the destination * buffer (a kind of "drunken master style" access pattern). * * See: https://github.com/ssvb/tinymembench/issues/7 */ void aligned_block_fill_shuffle16(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t data = *src; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { dst[0 + 0] = data; dst[1 + 0] = data; dst[1 + 2] = data; dst[0 + 2] = data; dst[1 + 4] = data; dst[0 + 4] = data; dst[0 + 6] = data; dst[1 + 6] = data; dst += 8; } } void aligned_block_fill_shuffle32(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t data = *src; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { dst[3 + 0] = data; dst[0 + 0] = data; dst[2 + 0] = data; dst[1 + 0] = data; dst[3 + 4] = data; dst[0 + 4] = data; dst[2 + 4] = data; dst[1 + 4] = data; dst += 8; } } void aligned_block_fill_shuffle64(int64_t *__restrict dst_, int64_t *__restrict src, size_t size) { volatile int64_t *dst = dst_; int64_t data = *src; ssize_t offset = (ssize_t)size; while ((offset -= 64) >= 0) { dst[5] = data; dst[2] = data; dst[7] = data; dst[6] = data; dst[1] = data; dst[3] = data; dst[0] = data; dst[4] = data; dst += 8; } } double gettime(void) { struct timeval tv; gettimeofday(&tv, NULL); return (double)((int64_t)tv.tv_sec * 1000000 + tv.tv_usec) / 1000000.; } double fmin(double a, double b) { return a < b ? a : b; } #define ALIGN_PADDING 0x100000 #define CACHE_LINE_SIZE 128 static char *align_up(char *ptr, int align) { return (char *)(((uintptr_t)ptr + align - 1) & ~(uintptr_t)(align - 1)); } void *alloc_four_nonaliased_buffers(void **buf1_, size_t size1, void **buf2_, size_t size2, void **buf3_, size_t size3, void **buf4_, size_t size4) { char **buf1 = (char **)buf1_, **buf2 = (char **)buf2_; char **buf3 = (char **)buf3_, **buf4 = (char **)buf4_; int antialias_pattern_mask = (ALIGN_PADDING - 1) & ~(CACHE_LINE_SIZE - 1); char *buf, *ptr; if (!buf1 || size1 < 0) size1 = 0; if (!buf2 || size2 < 0) size2 = 0; if (!buf3 || size3 < 0) size3 = 0; if (!buf4 || size4 < 0) size4 = 0; ptr = buf = (char *)malloc(size1 + size2 + size3 + size4 + 9 * ALIGN_PADDING); memset(buf, 0xCC, size1 + size2 + size3 + size4 + 9 * ALIGN_PADDING); ptr = align_up(ptr, ALIGN_PADDING); if (buf1) { *buf1 = ptr + (0xAAAAAAAA & antialias_pattern_mask); ptr = align_up(*buf1 + size1, ALIGN_PADDING); } if (buf2) { *buf2 = ptr + (0x55555555 & antialias_pattern_mask); ptr = align_up(*buf2 + size2, ALIGN_PADDING); } if (buf3) { *buf3 = ptr + (0xCCCCCCCC & antialias_pattern_mask); ptr = align_up(*buf3 + size3, ALIGN_PADDING); } if (buf4) { *buf4 = ptr + (0x33333333 & antialias_pattern_mask); } return buf; } void stream_copy(int64_t *__restrict dst, int64_t *__restrict src, size_t size) { #pragma omp parallel for for (size_t j = 0; j < size / sizeof(int64_t); j++) { dst[j] = src[j]; } }
life.c
/** * @author Zachary M. Mattis * ECE 1166 * OpenMP Conway's Game of Life * February 18, 2019 * * Purpose: * The game of life! A file is provided by the user, * a square grid of o's and x's, symbolizing an initial * grid of either a live or a dead cell. The rules * of the game are that a live cell having either 2 or * 3 neighbors lives on, more or less than that it dies. * A dead cell with exactly 3 neighbors lives on to the next * generation. The grid is a torus, so cells on the edge * of the grid are adjacent to their opposite cells. * * Compile: * gcc -Wall -o life.x life.c -lm -fopenmp * Usage: * ./life <cores> <infile name> <# of iterations> <print frequency> * Input: * cores - number of cores to use during execution * infile name - the initial map name as a file, elements, o= alive, x=dead * iterations - total number of steps to run the simulation * print frequency - the view update frequency (must be >= iterations) * * Output: * The grid at the final step multiple of the state given by d in the update frequency * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include <omp.h> #include <unistd.h> // 1 = alive, 0 = dead typedef unsigned char cell; #define ALIVE 'o' #define DEAD 'x' #define KRED "\x1B[31m" #define KGRN "\x1B[36m" #define RESET "\x1B[0m" #define IS_ALIVE(x) (x == ALIVE) #define IS_DEAD(x) (x == DEAD) #define ARG_CORES 1 #define ARG_FILE_NAME 2 #define ARG_NUM_ITERATIONS 3 #define ARG_PRINT_FREQ 4 #define MILLISECONDS 1000 /*--------------------------------------------------------------------- * Function : getRowsAndCols * Purpose : Reads the input file given by the user and automatically * calculates how many rows and columns there are * In args * filename : name of input file * In/Out args * rows, columns : # rows and # columns integer pointers */ void getRowsAndCols(const char *filename, int *rows, int *cols); /*--------------------------------------------------------------------- * Function : initializeGrid * Purpose : Reads the input file given by the user and automatically * calculates how many rows and columns there are * In args * filename : name of input file * In/Out args * grid : grid of cells, preallocated */ void initializeGrid(cell *grid, const char *filename); /** * [printGrid prints game of life grid with color in bash] * @param grid [grid of cells] * @param rows [number of rows in the grid] * @param cols [number of columns in the grid] * @param step [step number that is being printed] * @param outputFile [file to print to] */ void printGrid(cell *grid, const int rows, const int cols, const int step, FILE *outputFile); /*--------------------------------------------------------------------- * Function : getNeighborCount * Purpose : Calculates the number of neighboring cells alive * for a given cell * In args * lifeGrid : ptr to life grid * rows : # of rows in life grid * cols : # of cols in life grid * x : x position of current cell * y : y position of current cell * Return * : # of neighboring cells alive */ int getNeighborCount(cell *lifeGrid, const int rows, const int cols, int x, int y); int main(int argc, char **argv) { if (argc != 5) { fprintf(stderr, "usage: ./life <cores> <infile name> <# of iterations> <print frequency> \n"); exit(EXIT_FAILURE); } double time_start, time_end; time_start = omp_get_wtime(); cell *lifeGrid, *tempGrid, *swapPtr; // ROW-MAJOR ORDER int rows = 0, cols = 0, neighborCount = 0, i = 0, j = 0, k = 0; const int cores = atoi(argv[ARG_CORES]); getRowsAndCols(argv[ARG_FILE_NAME], &rows, &cols); const int num_steps = atoi(argv[ARG_NUM_ITERATIONS]); const int print_frequency = atoi(argv[ARG_PRINT_FREQ]); char *outfile_name = calloc(100, sizeof(char)); snprintf(outfile_name, 100, "%s_%s_%s_%s.out", argv[ARG_CORES], argv[ARG_FILE_NAME], argv[ARG_NUM_ITERATIONS], argv[ARG_PRINT_FREQ]); lifeGrid = calloc(rows * cols, sizeof(cell)); // life grid tempGrid = calloc(rows * cols, sizeof(cell)); // temp grid for calculations initializeGrid(lifeGrid, argv[ARG_FILE_NAME]); for ( ; i < num_steps; i++) { if (i % print_frequency == 0) { printGrid(lifeGrid, rows, cols, i, stdout); } // Game of Life #pragma omp parallel for private(j, k, neighborCount) shared (lifeGrid, tempGrid, rows, cols) default (none) for (j=0; j<rows; j++ ) { //printf("Hello x%d from thread: %d\n", j, omp_get_thread_num() ); for (k=0; k<cols; k++) { neighborCount = getNeighborCount( lifeGrid, rows, cols, j, k ); if(neighborCount == 2) tempGrid[j*cols+k] = lifeGrid[j*cols+k]; if(neighborCount == 3) tempGrid[j*cols+k] = ALIVE; if(neighborCount < 2) tempGrid[j*cols+k] = DEAD; if(neighborCount > 3) tempGrid[j*cols+k] = DEAD; } } // swap grids swapPtr = lifeGrid; lifeGrid = tempGrid; tempGrid = swapPtr; } FILE *outfile = fopen(outfile_name, "w"); printGrid(lifeGrid, rows, cols, i, outfile); // end conways game of life fclose(outfile); free(outfile_name); free(tempGrid); free(lifeGrid); time_end = omp_get_wtime(); printf("Execution Time (s): %f\n", time_end-time_start ); return (EXIT_SUCCESS); } void getRowsAndCols(const char *filename, int *rows, int *cols) { FILE *gridfile; // enough room to store each cell plus space, initialized with zeros char *row = calloc(2, sizeof(char)); int i = 0; if ((row) && (filename)) { // on process 0, open the file to read gridfile = fopen(filename, "r"); if (!gridfile) { fprintf(stderr, "Something went wrong when reading %s", filename); exit(EXIT_FAILURE); } // read in each row until rows *rows = 0; *cols = 0; while (fscanf(gridfile, "%s", row) != EOF) { i += 1; if ((getc(gridfile) == '\n')) { *rows += 1; } } } else { fprintf(stderr, "Something went wrong..."); exit(EXIT_FAILURE); } *cols = i / *rows; fclose(gridfile); free(row); } void initializeGrid(cell *myGrid, const char *filename) { FILE *gridfile; // enough room to store each cell plus space, initialized with zeros char *row = calloc(3, sizeof(char)); int i = 0; if ((row) && (myGrid) && (filename)) { // on process 0, open the file to read gridfile = fopen(filename, "r"); if (!gridfile) { fprintf(stderr, "Something went wrong when reading %s", filename); exit(EXIT_FAILURE); } while (fscanf(gridfile, "%s", row) != EOF) { myGrid[i++] = *row; } } else { fprintf(stderr, "Something went wrong..."); exit(EXIT_FAILURE); } fclose(gridfile); free(row); } void printGrid(cell *grid, const int rows, const int cols, const int step, FILE *output) { #pragma omp barrier { const int size = rows * cols; const int buffsize = 10; int i; fprintf(output, "%dx%d Grid for step %d\n", rows, cols, step); char *out = calloc(buffsize, sizeof(char)); for (i = 0; i < size; i++) { if ((i > 0) && (i % cols == 0)) { fprintf(output, "\n"); } if ((output == stdout) || (output == stderr)) { //strcat(out, grid[i] == DEAD ? KRED : KGRN); } strcat(out, (grid[i] == DEAD ? "x " : "o ")); if ((output == stdout) || (output == stderr)) { //strcat(out, RESET); } fprintf(output, out); memset(out, 0, buffsize); } fprintf(output, "\n\n"); free(out); } } int getNeighborCount(cell *lifeGrid, const int rows, const int cols, int x, int y) { int i, j; int count = 0; // go around the cell for (i=-1; i<=1; i++) { for (j=-1; j<=1; j++) { // no middle cell if ( i || j ) { // bound checks if ( x+i>=0 && x+i<=rows-1 && y+j>=0 && y+j<=cols-1 ) { //printf( "checking life_grid[%d][%d]\n", x+i, y+j ); if ( IS_ALIVE(lifeGrid[ (x+i)*cols+y+j ]) ) { count++; } } } } } //printf("count: %d\n", count ); return count; }
OMPSparseMatrix.c
#include <omp.h> #include <stdlib.h> #include <stdio.h> int main (int argc, char* argv[]) { //Declarations double dampingFactor = 0.15; int numPage = atoi(argv[1]); int totalSize = numPage * numPage; double *aArray, *pageRank, *yArray; int *iA, *jA; aArray = (double*)malloc((2*numPage-1)*sizeof(double)); iA = (int*)malloc((numPage+1)*sizeof(int)); jA = (int*)malloc((2*numPage-1)*sizeof(int)); pageRank = (double*)malloc(numPage*sizeof(double)); yArray = (double*)malloc(numPage*sizeof(double)); int i, j, K, k; K = 1000; double startTime, endTime; //setup the sArray for (i = 0; i < 2*numPage - 1; i++) { if (i == 2 ) aArray[i] = 1.0; else aArray[i] = 0.5; } //setup the iA and initialize pagerank to 1/Numpage for (i = 0; i < numPage + 1; i++) { if (i == numPage) iA[i] = 2*numPage - 1; else if (i == 0) iA[i] = 0; else iA[i] = iA[i-1] + 2; if (i < numPage) pageRank[i] = 1/(double)numPage; } //setup jA jA[0] = 1; jA[1] = numPage -1; jA[2] = 0; jA[3] = 2; for (i = 4; i < 2*numPage-1; i++) { jA[i] = jA[i -2] + 1; } //start timer and perform MatVec on the CSR K-times startTime = omp_get_wtime(); for (k = 0; k < K; k++) { #pragma omp parallel for private(j) for (i = 0; i < numPage; i++) { yArray[i] = 0.0; for (j = iA[i]; j < iA[i+1]; j++) { yArray[i] += aArray[j] * pageRank[jA[j]]; } } #pragma omp master for (i = 0; i < numPage; i++) { //apply damping yArray[i] = ((1 - dampingFactor)*yArray[i]) + (dampingFactor / numPage); pageRank[i] = yArray[i]; } #pragma end master } endTime = omp_get_wtime(); //print pagerank or max and min values if (numPage < 20) { for (i = 0; i < numPage; i++) { printf("PR(%d) = %f \n", i, pageRank[i]); } } else { double max, min; max = pageRank[0]; min = pageRank[0]; for (i = 0; i < numPage; i++) { if (max < pageRank[i]) max = pageRank[i]; if (min > pageRank[i]) min = pageRank[i]; } printf("Min Pagerank = %f \n", min); printf("Max Pagerank = %f \n", max); } //print runtime printf("RUNTIME = %.16f\n", endTime - startTime); return 0; }
custom_functions.h
// // Project Name: Kratos // Last Modified by: $Author: G.Casas (gcasas@cimmne.upc.edu) $ // Date: $Date: 2011-6-13 08:56:42 $ // Revision: $Revision: 1.5 $ // // //README::::look to the key word "VERSION" if you want to find all the points where you have to change something so that you can pass from a kdtree to a bin data search structure; #if !defined(KRATOS_CUSTOM_FUNCTIONS) #define KRATOS_CUSTOM_FUNCTIONS // /* External includes */ #ifdef _OPENMP #include <omp.h> #endif // System includes #include <vector> // Project includes #include "includes/model_part.h" #include "utilities/timer.h" #include "utilities/openmp_utils.h" #include "processes/find_elements_neighbours_process.h" #include "processes/find_nodal_neighbours_process.h" //Database includes #include "custom_utilities/search/discrete_particle_configure.h" #include "includes/define.h" #include "../../DEMApplication/custom_elements/discrete_element.h" #include "custom_elements/swimming_particle.h" #include "custom_utilities/AuxiliaryFunctions.h" #include "../../DEMApplication/custom_elements/spheric_particle.h" #include "../swimming_DEM_application.h" #include "../../../kratos/utilities/geometry_utilities.h" namespace Kratos { template <std::size_t TDim> class CustomFunctionsCalculator { public: typedef ModelPart::ElementsContainerType::iterator ElementIterator; typedef ModelPart::NodesContainerType::iterator NodeIterator; typedef ModelPart::NodesContainerType NodesArrayType; KRATOS_CLASS_POINTER_DEFINITION(CustomFunctionsCalculator); CustomFunctionsCalculator(): mPressuresFilled(false), mFirstGradientRecovery(true), mFirstLaplacianRecovery(true), mSomeCloudsDontWork(false), mCalculatingTheGradient(false), mCalculatingTheLaplacian(false), mFirstTimeAppending(true){} /// Calculator virtual ~CustomFunctionsCalculator(){} /// Default calculator //************************************************************************************************************************************************** //************************************************************************************************************************************************** void CalculatePressureGradient(ModelPart& r_model_part) { for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){ noalias(inode->FastGetSolutionStepValue(PRESSURE_GRADIENT)) = ZeroVector(3); } array_1d <double, 3> grad = ZeroVector(3); // its dimension is always 3 array_1d <double, TDim + 1 > elemental_pressures; array_1d <double, TDim + 1 > N; // shape functions vector BoundedMatrix<double, TDim + 1, TDim> DN_DX; for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){ // computing the shape function derivatives Geometry<Node<3> >& geom = ielem->GetGeometry(); double Volume; GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); // getting the pressure gradients; for (unsigned int i = 0; i < TDim + 1; ++i){ elemental_pressures[i] = geom[i].FastGetSolutionStepValue(PRESSURE); } array_1d <double, TDim> grad_aux = prod(trans(DN_DX), elemental_pressures); // its dimension may be 2 for (unsigned int i = 0; i < TDim; ++i){ grad[i] = grad_aux[i]; } double nodal_area = Volume / static_cast<double>(TDim + 1); grad *= nodal_area; for (unsigned int i = 0; i < TDim + 1; ++i){ geom[i].FastGetSolutionStepValue(PRESSURE_GRADIENT) += grad; } } for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){ inode->FastGetSolutionStepValue(PRESSURE_GRADIENT) /= inode->FastGetSolutionStepValue(NODAL_AREA); } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // This function assesses the stationarity based on the pressure field variation. // Its tolerance applies to the non-dimensional pressure variation between consecutive // measurements. bool AssessStationarity(ModelPart& r_model_part, const double& tol) { if (!mPressuresFilled){ PerformFirstStepComputations(r_model_part); return(false); } else { double max_pressure_change_rate = 0.0; // measure of stationarity double mean_celerity = 0.0; // used to adimensionalize the time step // filling up mPressures and calculating the mean velocities and the maximum nodal pressure change unsigned int i = 0; for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){ const array_1d<double, 3>& velocity = inode->FastGetSolutionStepValue(VELOCITY); mean_celerity += SWIMMING_MODULUS_3(velocity); const double new_pressure = inode->FastGetSolutionStepValue(PRESSURE); double& old_pressure = mPressures[i]; const double delta_p = std::abs(new_pressure - old_pressure); max_pressure_change_rate = std::max(delta_p, max_pressure_change_rate); old_pressure = new_pressure; ++i; } mean_celerity /= i; const double delta_t = r_model_part.GetProcessInfo()[TIME] - mLastMeasurementTime; if (delta_t > 0.0){ max_pressure_change_rate /= delta_t; // calculating coefficients for adimensionalization of the pressure change rate const double characteristic_length = std::pow(mTotalDomainVolume, 1.0 / 3); // characteristic length of the model. Should be improved: a hydraulic radius or such const double reciprocal_of_characteristic_time = mean_celerity / characteristic_length; const double pressure_spatial_variation = GetRangeWithinVector(mPressures); mLastPressureVariation = pressure_spatial_variation; const double characteristic_pressure_variation = 0.5 * (pressure_spatial_variation + mLastPressureVariation); if (std::abs(characteristic_pressure_variation) < std::numeric_limits<double>::epsilon() || std::abs(reciprocal_of_characteristic_time) < std::numeric_limits<double>::epsilon()){ // unlikely std::cout << "Uniform problem: stationarity check being performed with dimensional values...! " << "\n"; if (max_pressure_change_rate <= tol){ // go with the absolute value return true; } } max_pressure_change_rate /= reciprocal_of_characteristic_time * characteristic_pressure_variation ; } else { KRATOS_ERROR << "Trying to calculate pressure variations between two coincident time steps! (null time variation since last recorded time)" << std::endl; } std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n"; std::cout << "The stationarity condition tolerance is " << "\n"; KRATOS_INFO("SwimmingDEM") << tol << std::endl; std::cout << "The stationarity residual is now " << "\n"; KRATOS_INFO("SwimmingDEM") << max_pressure_change_rate << std::endl; std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n"; return max_pressure_change_rate <= tol; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateDomainVolume(ModelPart& r_fluid_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_volume = 0.0; #pragma omp parallel for reduction(+ : added_volume) for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){ added_volume += CalculateElementalVolume(it->GetGeometry()); } } return added_volume; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // this function assumes linear elements are used void CalculateTotalHydrodynamicForceOnParticles(ModelPart& r_dem_model_part, array_1d <double, 3>& force) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_dem_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); std::vector<array_1d <double, 3> > added_force_vect; added_force_vect.resize(ParallelUtilities::GetNumThreads()); for (unsigned int k = 0; k < added_force_vect.size(); ++k){ added_force_vect[k] = ZeroVector(3); } #pragma omp parallel for for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_dem_model_part, k); it != GetElementPartitionEnd(r_dem_model_part, k); ++it){ Geometry< Node<3> >& geom = it->GetGeometry(); array_1d <double, 3> element_force; if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_FORCE)){ element_force = geom[0].FastGetSolutionStepValue(HYDRODYNAMIC_FORCE); } else { element_force = ZeroVector(3); } added_force_vect[k] += element_force; } } force = added_force_vect[0]; for (unsigned int k = 1; k < added_force_vect.size(); ++k){ force += added_force_vect[k]; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // this function assumes linear elements are used void CalculateTotalHydrodynamicForceOnFluid(ModelPart& r_fluid_model_part, array_1d <double, 3>& instantaneous_force, array_1d <double, 3>& mean_force) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); std::vector<array_1d <double, 3> > added_force_vect; added_force_vect.resize(ParallelUtilities::GetNumThreads()); std::vector<array_1d <double, 3> > added_mean_force_vect; added_mean_force_vect.resize(ParallelUtilities::GetNumThreads()); for (unsigned int k = 0; k < added_force_vect.size(); ++k){ added_force_vect[k] = ZeroVector(3); added_mean_force_vect[k] = ZeroVector(3); } #pragma omp parallel for for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){ Geometry< Node<3> >& geom = it->GetGeometry(); double element_volume; array_1d <double, 3> element_force; array_1d <double, 3> element_mean_force; if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){ element_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, HYDRODYNAMIC_REACTION, element_volume); } else { element_force = ZeroVector(3); } if (geom[0].SolutionStepsDataHas(MEAN_HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){ element_mean_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, MEAN_HYDRODYNAMIC_REACTION, element_volume); } else { element_mean_force = ZeroVector(3); } added_force_vect[k] += element_force; added_mean_force_vect[k] += element_mean_force; } } instantaneous_force = added_force_vect[0]; mean_force = added_force_vect[0]; for (unsigned int k = 1; k < added_force_vect.size(); ++k){ instantaneous_force += added_force_vect[k]; mean_force += added_mean_force_vect[k]; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // this function assumes linear elements are used double CalculateGlobalFluidVolume(ModelPart& r_fluid_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_fluid_volume = 0.0; #pragma omp parallel for reduction(+ : added_fluid_volume) for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){ Geometry< Node<3> >& geom = it->GetGeometry(); double element_volume; double element_fluid_volume; if (geom[0].SolutionStepsDataHas(FLUID_FRACTION)){ element_fluid_volume = CalculateScalarIntegralOfLinearInterpolation(geom, FLUID_FRACTION, element_volume); } else { element_fluid_volume = CalculateElementalVolume(geom); } added_fluid_volume += element_fluid_volume; } } return added_fluid_volume; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** template<class matrix_T> double determinant(boost::numeric::ublas::matrix_expression<matrix_T> const& mat_r) { double det = 1.0; matrix_T mLu(mat_r() ); boost::numeric::ublas::permutation_matrix<std::size_t> pivots(mat_r().size1() ); int is_singular = lu_factorize(mLu, pivots); if (!is_singular) { for (std::size_t i=0; i < pivots.size(); ++i) { if (pivots(i) != i) det *= -1.0; det *= mLu(i,i); } } else det = 0.0; return det; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** const DenseMatrix<double> Inverse( const DenseMatrix<double>& m) { assert(m.size1() == m.size2() && "Can only calculate the inverse of square matrices"); switch(m.size1()) { case 1: { assert(m.size1() == 1 && m.size2() == 1 && "Only for 1x1 matrices"); const double determinant = CalcDeterminant(m); assert(determinant != 0.0); assert(m(0,0) != 0.0 && "Cannot take the inverse of matrix [0]"); DenseMatrix<double> n(1,1); n(0,0) = 1.0 / determinant; return n; } case 2: { assert(m.size1() == 2 && m.size2() == 2 && "Only for 2x2 matrices"); const double determinant = CalcDeterminant(m); assert(determinant != 0.0); const double a = m(0,0); const double b = m(0,1); const double c = m(1,0); const double d = m(1,1); DenseMatrix<double> n(2,2); n(0,0) = d / determinant; n(0,1) = -b / determinant; n(1,0) = -c / determinant; n(1,1) = a / determinant; return n; } case 3: { assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices"); const double determinant = CalcDeterminant(m); assert(determinant != 0.0); const double a = m(0,0); const double b = m(0,1); const double c = m(0,2); const double d = m(1,0); const double e = m(1,1); const double f = m(1,2); const double g = m(2,0); const double h = m(2,1); const double k = m(2,2); DenseMatrix<double> n(3,3); const double new_a = ((e*k)-(f*h)) / determinant; const double new_b = -((d*k)-(f*g)) / determinant; const double new_c = ((d*h)-(e*g)) / determinant; const double new_d = -((b*k)-(c*h)) / determinant; const double new_e = ((a*k)-(c*g)) / determinant; const double new_f = -((a*h)-(b*g)) / determinant; const double new_g = ((b*f)-(c*e)) / determinant; const double new_h = -((a*f)-(c*d)) / determinant; const double new_k = ((a*e)-(b*d)) / determinant; n(0,0) = new_a; n(1,0) = new_b; n(2,0) = new_c; n(0,1) = new_d; n(1,1) = new_e; n(2,1) = new_f; n(0,2) = new_g; n(1,2) = new_h; n(2,2) = new_k; return n; } default: { //Use blockwise inversion //Matrix::Chop returns a std::vector //[ A at [0] B at [1] ] //[ C at [2] D at [4] ] const std::vector<DenseMatrix<double> > v = Chop(m); const DenseMatrix<double>& a = v[0]; assert(a.size1() == a.size2()); const DenseMatrix<double> a_inv = Inverse(a); const DenseMatrix<double>& b = v[1]; const DenseMatrix<double>& c = v[2]; const DenseMatrix<double>& d = v[3]; const DenseMatrix<double> term = d - prod( DenseMatrix<double>(prod(c,a_inv)), b ); const DenseMatrix<double> term_inv = Inverse(term); const DenseMatrix<double> new_a = a_inv + DenseMatrix<double>(prod( DenseMatrix<double>(prod( DenseMatrix<double>(prod( DenseMatrix<double>(prod( a_inv, b)), term_inv)), c)), a_inv)); const DenseMatrix<double> new_b = - DenseMatrix<double>(prod( DenseMatrix<double>(prod( a_inv, b)), term_inv)); const DenseMatrix<double> new_c = - DenseMatrix<double>(prod( DenseMatrix<double>(prod( term_inv, c)), a_inv)); const DenseMatrix<double> new_d = term_inv; std::vector<DenseMatrix<double> > w; w.push_back(new_a); w.push_back(new_b); w.push_back(new_c); w.push_back(new_d); const DenseMatrix<double> result = Unchop(w); return result; } } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<double>& origin_variable, const Variable<double>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); double& destination_value = p_node->FastGetSolutionStepValue(destination_variable); const double& origin_value = p_node->FastGetSolutionStepValue(origin_variable); destination_value = origin_value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<array_1d<double, 3>>& origin_variable, const Variable<array_1d<double, 3>>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable); const array_1d<double, 3>& origin_value = p_node->FastGetSolutionStepValue(origin_variable); noalias(destination_value) = origin_value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void SetValueOfAllNotes(ModelPart& r_model_part, const double& value, const Variable<double>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); double& destination_value = p_node->FastGetSolutionStepValue(destination_variable); destination_value = value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void SetValueOfAllNotes(ModelPart& r_model_part, const array_1d<double, 3>& value, const Variable<array_1d<double, 3>>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable); noalias(destination_value) = value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** private: bool mPressuresFilled; bool mFirstGradientRecovery; bool mFirstLaplacianRecovery; bool mSomeCloudsDontWork; bool mCalculatingTheGradient; bool mCalculatingTheLaplacian; bool mFirstTimeAppending; double mLastMeasurementTime; double mLastPressureVariation; double mTotalDomainVolume; std::vector<double> mPressures; std::vector<DenseVector<double> > mFirstRowsOfB; //************************************************************************************************************************************************** //************************************************************************************************************************************************** inline double CalculateArea(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2) { const double x10 = x1 - x0; const double y10 = y1 - y0; const double x20 = x2 - x0; const double y20 = y2 - y0; const double area = 0.5 * std::abs(x10 * y20 - x20 * y10); return area; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666666667; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateElementalVolume(const Geometry<Node <3> >& geom) { double vol; if (TDim == 2){ double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); vol = CalculateArea(x0, y0, x1, y1, x2, y2); } else { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); } if (std::abs(vol) < std::numeric_limits<double>::epsilon()){ KRATOS_ERROR << "Element with zero area found with the current geometry "<< geom << std::endl; } return vol; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateScalarIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<double>& r_var, double& vol) { array_1d<double, 4> N; double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double xc = 0.25 * (x0 + x1 + x2 + x3); double yc = 0.25 * (y0 + y1 + y2 + y3); double zc = 0.25 * (z0 + z1 + z2 + z3); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF(std::abs(vol) < std::numeric_limits<double>::epsilon()) << "Element with zero area found. Its geometry is given by "<< geom << std::endl; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc); N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc); N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc); N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc); double value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var); for (unsigned int i = 1; i != 4; ++i){ value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var, 0); } return value_at_gauss_point; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol) { array_1d<double, 4> N; double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double xc = 0.25 * (x0 + x1 + x2 + x3); double yc = 0.25 * (y0 + y1 + y2 + y3); double zc = 0.25 * (z0 + z1 + z2 + z3); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF(std::abs(vol) < std::numeric_limits<double>::epsilon()) << "Element with zero area found. Its geometry is given by " << geom << std::endl; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc); N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc); N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc); N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc); array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var); for (unsigned int i = 1; i != 4; ++i){ value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var); } return value_at_gauss_point; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol) { array_1d<double, 4> N; double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double xc = 0.25 * (x0 + x1 + x2 + x3); double yc = 0.25 * (y0 + y1 + y2 + y3); double zc = 0.25 * (z0 + z1 + z2 + z3); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF(std::abs(vol) < std::numeric_limits<double>::epsilon()) << "Element with zero area found. Its geometry is given by " << geom << std::endl; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc); N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc); N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc); N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc); array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var) * geom[0].FastGetSolutionStepValue(DENSITY) * geom[0].FastGetSolutionStepValue(FLUID_FRACTION); for (unsigned int i = 1; i != 4; ++i){ value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var) * geom[i].FastGetSolutionStepValue(DENSITY) * geom[i].FastGetSolutionStepValue(FLUID_FRACTION); } return value_at_gauss_point; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void PerformFirstStepComputations(ModelPart& r_model_part) { mTotalDomainVolume = CalculateDomainVolume(r_model_part); mPressures.resize(r_model_part.Nodes().size()); mLastMeasurementTime = r_model_part.GetProcessInfo()[TIME]; unsigned int i = 0; for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode) { mPressures[i] = inode->FastGetSolutionStepValue(PRESSURE); ++i; } mPressuresFilled = true; mLastPressureVariation = GetRangeWithinVector(mPressures); } //************************************************************************************************************************************************** //************************************************************************************************************************************************** struct IsCloser{ bool operator()(std::pair<unsigned int, double> const& first_pair, std::pair<unsigned int, double> const& second_pair) { return(first_pair.second < second_pair.second || (first_pair.second == second_pair.second && first_pair.first < second_pair.first)); } }; //************************************************************************************************************************************************** //************************************************************************************************************************************************** inline int Factorial(const unsigned int n){ if (n == 0){ return 1; } unsigned int k = n; for (unsigned int i = n - 1; i > 0; --i){ k *= i; } return k; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateTheMaximumEdgeLength(ModelPart& r_model_part) { double max_distance_yet = 0.0; for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){ Geometry<Node<3> >& geom = ielem->GetGeometry(); unsigned int n_nodes = static_cast<unsigned int>(TDim + 1); for (unsigned int k = 1; k < n_nodes - 1; ++k){ for (unsigned int i = k; i < n_nodes; ++i){ array_1d <double, 3> delta_i = geom[k - 1] - geom[i]; double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i); max_distance_yet = max_distance_yet > distance_2 ? max_distance_yet : distance_2; } } } return(std::sqrt(max_distance_yet)); } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateTheMinumumEdgeLength(ModelPart& r_model_part) { double min_distance_yet = 0.0; bool first_node = true; for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){ Geometry<Node<3> >& geom = ielem->GetGeometry(); if (first_node){ // assign the distance (squared) between any two nodes to min_distance_yet array_1d <double, 3> delta = geom[0] - geom[1]; double distance_2 = DEM_INNER_PRODUCT_3(delta, delta); min_distance_yet = distance_2; } unsigned int n_nodes = static_cast<unsigned int>(TDim + 1); for (unsigned int k = 1; k < n_nodes - 1; ++k){ for (unsigned int i = k; i < n_nodes; ++i){ array_1d <double, 3> delta_i = geom[k - 1] - geom[i]; double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i); min_distance_yet = min_distance_yet < distance_2 ? min_distance_yet : distance_2; } } } return(std::sqrt(min_distance_yet)); } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // The following block of functions is used to calculate explicit matrix inverses and was taken from // Richel BilderBeek's website (http://www.richelbilderbeek.nl/CppUblasMatrixExample6.htm), and it is // transcribed here with a very minor modification double CalcDeterminant(const DenseMatrix<double>& m) { assert(m.size1() == m.size2() && "Can only calculate the determinant of square matrices"); switch(m.size1()) { case 1: { return m(0,0); } case 2: { const double a = m(0,0); const double b = m(0,1); const double c = m(1,0); const double d = m(1,1); const double determinant = (a * d) - (b * c); return determinant; } case 3: { assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices"); const double a = m(0,0); const double b = m(0,1); const double c = m(0,2); const double d = m(1,0); const double e = m(1,1); const double f = m(1,2); const double g = m(2,0); const double h = m(2,1); const double k = m(2,2); const double determinant = (a * ((e*k) - (f*h))) - (b * ((k*d) - (f*g))) + (c * ((d*h) - (e*g))); return determinant; } default: assert(!"Should not get here: unsupported matrix size"); throw std::runtime_error("Unsupported matrix size"); } } ///Chop returns a std::vector of sub-matrices //[ A at [0] B at [1] ] //[ C at [2] D at [4] ] const std::vector<DenseMatrix<double> > Chop( const DenseMatrix<double>& m) { using boost::numeric::ublas::range; using boost::numeric::ublas::matrix_range; std::vector<matrix<double> > v; v.reserve(4); const int midy = m.size1() / 2; const int midx = m.size2() / 2; const matrix_range<const matrix<double> > top_left( m,range(0 ,midy ),range(0 ,midx )); const matrix_range<const matrix<double> > bottom_left( m,range(midy,m.size1()),range(0 ,midx )); const matrix_range<const matrix<double> > top_right( m,range(0 ,midy ),range(midx,m.size2())); const matrix_range<const matrix<double> > bottom_right(m,range(midy,m.size1()),range(midx,m.size2())); v.push_back(matrix<double>(top_left)); v.push_back(matrix<double>(top_right)); v.push_back(matrix<double>(bottom_left)); v.push_back(matrix<double>(bottom_right)); return v; } ///Unchop merges the 4 std::vector of sub-matrices produced by Chop const DenseMatrix<double> Unchop( const std::vector<DenseMatrix<double> >& v) { //Chop returns a std::vector of sub-matrices //[ A at [0] B at [1] ] //[ C at [2] D at [4] ] using boost::numeric::ublas::range; using boost::numeric::ublas::matrix_range; assert(v.size() == 4); assert(v[0].size1() == v[1].size1()); assert(v[2].size1() == v[3].size1()); assert(v[0].size2() == v[2].size2()); assert(v[1].size2() == v[3].size2()); DenseMatrix<double> m(v[0].size1() + v[2].size1(),v[0].size2() + v[1].size2()); for (int quadrant=0; quadrant!=4; ++quadrant) { const DenseMatrix<double>& w = v[quadrant]; const std::size_t n_rows = v[quadrant].size1(); const std::size_t n_cols = v[quadrant].size2(); const int offset_x = quadrant % 2 ? v[0].size2() : 0; const int offset_y = quadrant / 2 ? v[0].size1() : 0; for (std::size_t row=0; row!=n_rows; ++row) { for (std::size_t col=0; col!=n_cols; ++col) { m(offset_y + row, offset_x + col) = w(row,col); } } } assert(v[0].size1() + v[2].size1() == m.size1()); assert(v[1].size1() + v[3].size1() == m.size1()); assert(v[0].size2() + v[1].size2() == m.size2()); assert(v[2].size2() + v[3].size2() == m.size2()); return m; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** ///@} ///@name Member r_variables ///@{ DenseVector<unsigned int> mElementsPartition; ///@} ///@name Un accessible methods ///@{ double GetRangeWithinVector(const std::vector<double>& vector) { double min = vector[0]; double max = vector[0]; for (unsigned int i = 0; i != vector.size(); ++i){ min = std::min(min, mPressures[i]); max = std::max(max, mPressures[i]); } return (max - min); } DenseVector<unsigned int>& GetElementPartition() { return mElementsPartition; } ElementIterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k) { return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k]; } ElementIterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k) { return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k + 1]; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** }; // Class CustomFunctionsCalculator } // namespace Kratos. #endif // KRATOS_CREATE_AND_DESTROY defined
restriction.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ static inline void RestrictBlock(level_type *level_c, int id_c, level_type *level_f, int id_f, blockCopy_type *block, int restrictionType){ // restrict 3D array from read_i,j,k of read[] to write_i,j,k in write[] int dim_i = block->dim.i; // calculate the dimensions of the resultant coarse block int dim_j = block->dim.j; int dim_k = block->dim.k; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; if(block->read.box >=0){ read = level_f->my_boxes[ block->read.box].vectors[id_f] + level_f->my_boxes[ block->read.box].ghosts*(1+level_f->my_boxes[ block->read.box].jStride+level_f->my_boxes[ block->read.box].kStride); read_jStride = level_f->my_boxes[block->read.box ].jStride; read_kStride = level_f->my_boxes[block->read.box ].kStride; } if(block->write.box>=0){ write = level_c->my_boxes[block->write.box].vectors[id_c] + level_c->my_boxes[block->write.box].ghosts*(1+level_c->my_boxes[block->write.box].jStride+level_c->my_boxes[block->write.box].kStride); write_jStride = level_c->my_boxes[block->write.box].jStride; write_kStride = level_c->my_boxes[block->write.box].kStride; } int i,j,k; switch(restrictionType){ case RESTRICT_CELL: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ]+read[read_ijk+1 ] + read[read_ijk +read_jStride ]+read[read_ijk+1+read_jStride ] + read[read_ijk +read_kStride]+read[read_ijk+1 +read_kStride] + read[read_ijk +read_jStride+read_kStride]+read[read_ijk+1+read_jStride+read_kStride] ) * 0.125; }}}break; case RESTRICT_FACE_I: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+read_jStride ] + read[read_ijk +read_kStride] + read[read_ijk+read_jStride+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_J: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_kStride] + read[read_ijk+1+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_K: for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride; int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_jStride] + read[read_ijk+1+read_jStride] ) * 0.25; }}}break; } } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) restriction void restriction(level_type * level_c, int id_c, level_type *level_f, int id_f, int restrictionType){ uint64_t _timeCommunicationStart = CycleTime(); uint64_t _timeStart,_timeEnd; int buffer=0; int n; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f sends and level_c recvs... int nMessages = level_c->restriction[restrictionType].num_recvs + level_f->restriction[restrictionType].num_sends; MPI_Request *recv_requests = level_f->restriction[restrictionType].requests; MPI_Request *send_requests = level_f->restriction[restrictionType].requests + level_c->restriction[restrictionType].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... _timeStart = CycleTime(); // #pragma omp parallel for schedule(dynamic,1) hclib::finish([] { hclib::loop_domain_1d loop(level_c->restriction[restrictionType].num_recvs); hclib::forasync(&loop, [] (int n) { hclib::MPI_Irecv(level_c->restriction[restrictionType].recv_buffers[n], level_c->restriction[restrictionType].recv_sizes[n], MPI_DOUBLE, level_c->restriction[restrictionType].recv_ranks[n], 5, // by convention, restriction uses tag=5 MPI_COMM_WORLD, &recv_requests[n] ); }); }); _timeEnd = CycleTime(); level_f->cycles.restriction_recv += (_timeEnd-_timeStart); // pack MPI send buffers... _timeStart = CycleTime(); // #pragma omp parallel for private(buffer) if(level_f->restriction[restrictionType].num_blocks[0]>1) schedule(static,1) hclib::finish([] { hclib::loop_domain_1d loop(level_f->restriction[restrictionType].num_blocks[0]); hclib::forasync(&loop, [] (int buffer) { RestrictBlock(level_c,id_c,level_f,id_f, &level_f->restriction[restrictionType].blocks[0][buffer],restrictionType); }); }); _timeEnd = CycleTime(); level_f->cycles.restriction_pack += (_timeEnd-_timeStart); // loop through MPI send buffers and post Isend's... _timeStart = CycleTime(); // #pragma omp parallel for schedule(dynamic,1) hclib::finish([] { hclib::loop_domain_1d loop(level_f->restriction[restrictionType].num_sends); hclib::forasync(&loop, [] (int n) { hclib::MPI_Isend(level_f->restriction[restrictionType].send_buffers[n], level_f->restriction[restrictionType].send_sizes[n], MPI_DOUBLE, level_f->restriction[restrictionType].send_ranks[n], 5, // by convention, restriction uses tag=5 MPI_COMM_WORLD, &send_requests[n] ); }); }); _timeEnd = CycleTime(); level_f->cycles.restriction_send += (_timeEnd-_timeStart); #endif // perform local restriction[restrictionType]... try and hide within Isend latency... _timeStart = CycleTime(); // #pragma omp parallel for private(buffer) if(level_f->restriction[restrictionType].num_blocks[1]>1) schedule(static,1) hclib::finish([] { hclib::loop_domain_1d loop(level_f->restriction[restrictionType].num_blocks[1]); hclib::forasync(&loop, [] (int buffer) { RestrictBlock(level_c,id_c,level_f,id_f, &level_f->restriction[restrictionType].blocks[1][buffer],restrictionType); }); }); _timeEnd = CycleTime(); level_f->cycles.restriction_local += (_timeEnd-_timeStart); // wait for MPI to finish... #ifdef USE_MPI _timeStart = CycleTime(); if(nMessages)hclib::MPI_Waitall(nMessages,level_f->restriction[restrictionType].requests,level_f->restriction[restrictionType].status); _timeEnd = CycleTime(); level_f->cycles.restriction_wait += (_timeEnd-_timeStart); // unpack MPI receive buffers _timeStart = CycleTime(); // #pragma omp parallel for private(buffer) if(level_c->restriction[restrictionType].num_blocks[2]>1) schedule(static,1) hclib::finish([] { hclib::loop_domain_1d loop(level_c->restriction[restrictionType].num_blocks[2]); hclib::forasync(&loop, [] (int buffer) { CopyBlock(level_c,id_c,&level_c->restriction[restrictionType].blocks[2][buffer]); }); }); _timeEnd = CycleTime(); level_f->cycles.restriction_unpack += (_timeEnd-_timeStart); #endif level_f->cycles.restriction_total += (uint64_t)(CycleTime()-_timeCommunicationStart); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); // Emit any deferred diagnostics for FD and erase them from the map in which // they're stored. void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); /// Check whether the given type-dependent expression will be the name of a /// function or another callable function-like entity (e.g. a function // template or overload set) for any substitution. bool IsDependentFunctionNameExpr(Expr *E); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool Disambiguation = false); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block, /// A type constraint, UPPC_TypeConstraint }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// Called to set rounding mode for floating point operations. void setRoundingMode(LangOptions::FPRoundingModeKind); /// Called to set exception behavior for floating point operations. void setExceptionMode(LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
THTensorConv.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorConv.c" #else /* 2D Input, 2D kernel : convolve given image with the given kernel. */ TH_API void THTensor_(validXCorr2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { // regular convolution for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + yy*sr*ic + xx*sc; real *pw_ = k_; real sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { // SSE-based convolution for(yy = 0; yy < or; yy++) { real *pi_ = t_ + yy*sr*ic; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(add)(r_, pis_, alpha*pw_[kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } r_ += oc; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel. */ TH_API void THTensor_(validConv2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { // regular convolution for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + yy*sr*ic + xx*sc; real *pw_ = k_ + kr*kc - 1; real sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { // SSE-based convolution for(yy = 0; yy < or; yy++) { real *pw_ = k_ + kr*kc - 1; real *pi_ = t_ + yy*sr*ic; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(add)(r_, pis_, alpha*pw_[-kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } r_ += oc; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ TH_API void THTensor_(fullConv2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long oc = (ic - 1) * sc + kc; long xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { // regular convolution for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + yy*sr*oc + xx*sc; real *pw_ = k_; for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[kx]; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_++; } } } else { // SSE-based convolution for(yy = 0; yy < ir; yy++) { real *po_ = r_ + yy*sr*oc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(add)(pos_, t_, alpha*pw_[kx], ic); pos_++; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_ += ic; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ TH_API void THTensor_(fullXCorr2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long oc = (ic - 1) * sc + kc; long xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { // regular convolution for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + yy*sr*oc + xx*sc; real *pw_ = k_ + kr*kc -1; long kx, ky; for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_++; } } } else { // SSE-based convolution for(yy = 0; yy < ir; yy++) { real *po_ = r_ + yy*sr*oc; real *pw_ = k_ + kr*kc -1; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(add)(pos_, t_, pw_[-kx]*alpha, ic); pos_++; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_ += ic; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, valid convolution. for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ TH_API void THTensor_(validXCorr2DRevptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long or = ir - (kr - 1) * sr; long oc = ic - (kc - 1) * sc; long xx, yy, kx, ky; if ((sc != 1) || (kc < 4)) { // regular convolution for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy*sr*ic + xx*sc; real z = *k_++ * alpha; for(ky = 0; ky < or; ky++) { for(kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } } } } else { // SSE-based convolution for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy*sr*ic + xx*sc; real z = *k_++ * alpha; for(ky = 0; ky < or; ky++) { THVector_(add)(po_, pi_, z, oc); pi_ += ic; po_ += oc; } } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ TH_API void THTensor_(validXCorr3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long ot = (it - kt) / st + 1; long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long zz, xx, yy; for (zz = 0; zz < ot; zz++) { for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real *pw_ = k_; real sum = 0; long kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } } /* Update output */ *r_++ += sum*alpha; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ TH_API void THTensor_(validConv3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long ot = (it - kt) / st + 1; long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long zz, xx, yy; for(zz = 0; zz < ot; zz++) { for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real *pw_ = k_ + kt*kr*kc - 1; real sum = 0; long kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } } /* Update output */ *r_++ += alpha*sum; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ TH_API void THTensor_(fullConv3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long or = (ir - 1) * sr + kr; long oc = (ic - 1) * sc + kc; long zz, xx, yy; for(zz = 0; zz < it; zz++) { for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; real *pw_ = k_; long kz, kx, ky; //printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { //printf("o=%g,k=%g," , po_[kx],pw_[kx]); po_[kx] += z * pw_[kx]; //printf("o=%g " , po_[kx]); } //printf("\n"); po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } //printf("\n"); } t_++; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ TH_API void THTensor_(fullXCorr3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long or = (ir - 1) * sr + kr; long oc = (ic - 1) * sc + kc; long zz, xx, yy; for(zz = 0; zz < it; zz++) { for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; real *pw_ = k_ + kt*kr*kc -1; long kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } } t_++; } } } } /* 3D Input, 3D kernel : convolve given image with the given kernel, valid convolution. for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ TH_API void THTensor_(validXCorr3DRevptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long ot = it - (kt - 1) * st; long or = ir - (kr - 1) * sr; long oc = ic - (kc - 1) * sc; long zz, xx, yy; for(zz = 0; zz < kt; zz++) { for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real z = *k_++ * alpha; long kz, kx, ky; for(kz = 0; kz < ot; kz++) { for(ky = 0; ky < or; ky++) { for(kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } } } } } } void THTensor_(conv2d)(real* output_data, real alpha, real* ptr_input, long nInputRows, long nInputCols, real* ptr_weight, long nKernelRows, long nKernelCols, long srow, long scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } void THTensor_(conv3d)(real* output_data, real alpha, real* ptr_input, long nInputDepth, long nInputRows, long nInputCols, real* ptr_weight, long nKernelDepth, long nKernelRows, long nKernelCols, long sdepth, long srow, long scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(fullConv3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else if (*xc == 'X') THTensor_(validXCorr3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(validConv3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); } long THTensor_(convsize)(long x, long k, long s, const char* vf) { THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'"); if (*vf == 'V') return (x-k)/s + 1; else return (x-1)*s + k; } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) { long nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) { long nbatch, nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputRows, nOutputCols; long istride0, kstride0, istride1, kstride1; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; istride1 = input->stride[1]; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelPlane = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel"); THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; for(i = 0; i < nInputPlane; i++) { long p; for(p = 0; p < nbatch; p++) { /* get kernel */ real *ptr_weight = weight_data + p*kstride0 + k*kstride1; /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data + p*istride0 + i*istride1; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { // valid nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0, kstride1; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel; if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { // valid nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } long nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long k; #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } long k; #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ real *ptr_output = output_data + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long kstride0, kstride1; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel; if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } long nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { // valid nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ long p; #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ long p; #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } } long p; #pragma omp parallel for private(p) for (p=0; p < nbatch; p++) { long k; for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 2D input, 2D kernel, 2D output scalar multiplication like y <- x*y + beta*y */ void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { THArgCheck(t_->nDimension == 2 , 3, "input: 2D Tensor expected"); THArgCheck(k_->nDimension == 2 , 4, "kernel: 2D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel = THTensor_(newContiguous)(k_); long nInputRows = input->size[0]; long nInputCols = input->size[1]; long nKernelRows = kernel->size[0]; long nKernelCols = kernel->size[1]; long nOutputRows, nOutputCols; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize2d)(r_, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) THTensor_(zero)(r_); else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *ptr_input = THTensor_(data)(input); real *ptr_weight = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); /* do image, kernel convolution */ THTensor_(conv2d)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output component wise multiplication like y <- y.*x + beta*y */ void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long k; for(k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv2d)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output component wise multiplication like with a permutation map y <- y.*x + beta*y */ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long nmaps = map->size[0]; long k; for(k = 0; k < nmaps; k++) { /* get indices */ long from = (long)THTensor_(get2d)(map,k,0)-1; long to = (long)THTensor_(get2d)(map,k,1)-1; /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + from*istride0; /* get output */ real *ptr_output = output_data + to*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv2d)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth= kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv3DRevger : Input image is smaller than kernel"); nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth; nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; long nelem = THTensor_(nElement)(r_); THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long k,i; for(k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr3DRevptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor *kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long k,i; for(k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 5D kernel, 4D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0, kstride1; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 5 , 4, "kernel: 5D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel; if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelDepth = kernel->size[2]; nKernelRows = kernel->size[3]; nKernelCols = kernel->size[4]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long k,i; for(k = 0; k < nOutputPlane; k++) { for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output scalar multiplication like y <- x*y + beta*y */ void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel = THTensor_(newContiguous)(k_); long nInputDepth = input->size[0]; long nInputRows = input->size[1]; long nInputCols = input->size[2]; long nKernelDepth = kernel->size[0]; long nKernelRows = kernel->size[1]; long nKernelCols = kernel->size[2]; long nOutputDepth, nOutputRows, nOutputCols; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) THTensor_(zero)(r_); else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *ptr_input = THTensor_(data)(input); real *ptr_weight = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 4D output component wise multiplication like y <- y.*x + beta*y */ void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 4 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long k; for(k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 4D output component wise multiplication like with a permutation map y <- y.*x + beta*y */ void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); THTensor *input = THTensor_(newContiguous)(t_); THTensor* kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmap : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); long nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(kernel); real *output_data = THTensor_(data)(r_); long nmaps = map->size[0]; long k; for(k = 0; k < nmaps; k++) { /* get indices */ long from = (long)THTensor_(get2d)(map,k,0)-1; long to = (long)THTensor_(get2d)(map,k,1)-1; /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + from*istride0; /* get output */ real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv3d)(ptr_output, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } THTensor_(free)(input); THTensor_(free)(kernel); } #endif
clang-272521.c
#include <stdio.h> #include <complex.h> int main() { int i; float complex C = 0 + 0 * I; #pragma omp target parallel for reduction(+:C) map(tofrom: C) for (int i = 0; i <10; i++) { C += 1.0 + 1.0*I; } printf("C = %f+%fi\n", creal(C),cimag(C)); return 0; }
McULTRABuilder.h
#pragma once #include <algorithm> #include "../../../DataStructures/TripBased/Data.h" #include "../../../DataStructures/RAPTOR/Data.h" #include "../../../Helpers/MultiThreading.h" #include "../../../Helpers/Timer.h" #include "../../../Helpers/Console/Progress.h" #include "McShortcutSearch.h" namespace TripBased { template<bool DEBUG = false, bool USE_TIEBREAKING_KEY = true> class McULTRABuilder { public: inline static constexpr bool Debug = DEBUG; inline static constexpr bool UseTiebreakingKey = USE_TIEBREAKING_KEY; using Type = McULTRABuilder<Debug, UseTiebreakingKey>; public: McULTRABuilder(const Data& data) : data(data) { stopEventGraph.addVertices(data.numberOfStopEvents()); } void computeShortcuts(const ThreadPinning& threadPinning, const int intermediateWitnessTransferLimit = 0, const int finalWitnessTransferLimit = 0, const int minDepartureTime = -never, const int maxDepartureTime = never, const bool verbose = true) noexcept { if (verbose) std::cout << "Computing shortcuts with " << threadPinning.numberOfThreads << " threads." << std::endl; std::vector<Shortcut> shortcuts; Progress progress(data.numberOfStops(), verbose); omp_set_num_threads(threadPinning.numberOfThreads); #pragma omp parallel { threadPinning.pinThread(); McShortcutSearch<Debug, UseTiebreakingKey> shortcutSearch(data, intermediateWitnessTransferLimit, finalWitnessTransferLimit); #pragma omp for schedule(dynamic) for (size_t i = 0; i < data.numberOfStops(); i++) { shortcutSearch.run(StopId(i), minDepartureTime, maxDepartureTime); progress++; } #pragma omp critical { const std::vector<Shortcut>& localShortcuts = shortcutSearch.getShortcuts(); for (const Shortcut& shortcut : localShortcuts) { shortcuts.emplace_back(shortcut); } } } std::sort(shortcuts.begin(), shortcuts.end(), [](const Shortcut& a, const Shortcut& b){ return (a.origin < b.origin) || ((a.origin == b.origin) && (a.destination < b.destination)); }); stopEventGraph.addEdge(Vertex(shortcuts[0].origin), Vertex(shortcuts[0].destination)).set(TravelTime, shortcuts[0].walkingDistance); for (size_t i = 1; i < shortcuts.size(); i++) { if ((shortcuts[i].origin == shortcuts[i - 1].origin) && (shortcuts[i].destination == shortcuts[i - 1].destination)) continue; stopEventGraph.addEdge(Vertex(shortcuts[i].origin), Vertex(shortcuts[i].destination)).set(TravelTime, shortcuts[i].walkingDistance); } stopEventGraph.sortEdges(ToVertex); progress.finished(); } inline const DynamicTransferGraph& getStopEventGraph() const noexcept { return stopEventGraph; } inline DynamicTransferGraph& getStopEventGraph() noexcept { return stopEventGraph; } private: const Data& data; DynamicTransferGraph stopEventGraph; }; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) { for (t4=max(max(ceild(t1-14,16),ceild(8*t2-Nz-51,64)),ceild(24*t3-Ny-51,64));t4<=min(min(floord(4*Nt+Nx-9,64),floord(4*t1+Nx-1,64)),floord(24*t3+Nx+11,64));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),16*t4+14);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
pyfr_gemm_rm.c
/****************************************************************************** ** Copyright (c) 2016-2019, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <mkl.h> #include <libxsmm.h> static double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } int main(int argc, char *argv[]) { int n,m,k; int lda,ldb,ldc; double* a; double* b; double* c1; double* c2; struct timeval l_start, l_end; double l_total = 0.0; int reps, i, j; const int nblock = 16; double alpha = 1.0, beta = 1.0; char transa = 'N', transb = 'N'; libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE; libxsmm_dmmfunction kernel = NULL; if (argc != 5) { fprintf(stderr, "Invalid ./a,out M N K reps\n"); exit(-1); } m = atoi(argv[1]); n = atoi(argv[2]); k = atoi(argv[3]); reps = atoi(argv[4]); /* this is col-major what you want to use for the sizes in question */ lda = k; ldb = n; ldc = n; if (n % nblock != 0) { fprintf(stderr, "N needs to be divisable by %i\n", nblock); exit(-1); } a = (double*)_mm_malloc(lda*m*sizeof(double), 64); b = (double*)_mm_malloc(ldb*k*sizeof(double), 64); c1 = (double*)_mm_malloc(ldc*m*sizeof(double), 64); c2 = (double*)_mm_malloc(ldc*m*sizeof(double), 64); #pragma omp parallel for for (i = 0; i < lda*m; i++) { a[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldb*k; i++) { b[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldc*m; i++) { c1[i] = 0; c2[i] = 0; } /* JIT Kernel */ kernel = libxsmm_dmmdispatch(nblock, m, k, &ldb, &lda, &ldc, NULL, NULL, NULL, &l_prefetch_op ); if (kernel == 0) { printf("JIT failed, exiting\n"); exit(-1); } /* init MKL */ dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); #pragma omp parallel for for (i = 0; i < ldc*m; i++) { c1[i] = 0; c2[i] = 0; } gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { #pragma omp parallel for private(i) for ( i = 0; i < n; i+=nblock) { kernel( b+i, a, c2+i, NULL, NULL, NULL ); } gettimeofday(&l_end, NULL); } l_total = sec(l_start, l_end); fprintf(stdout, "time[s] libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); /* test result */ double max_error = 0.0; for ( i = 0; i < ldc*m; i++) { if (max_error < fabs(c1[i] - c2[i])) { max_error = fabs(c1[i] - c2[i]); } } printf("max error: %f\n\n", max_error); }
hermm_c_dia_n_hi_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number* Y = &y[index2(cc,0,ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i],Y[i],beta); const ALPHA_Number* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d > 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val,val_c; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val,X[ac]); alpha_madde(Y[ac],val_c,X[ar]); } } if(d == 0){ for(ALPHA_INT r = 0; r < mat->rows; ++r){ ALPHA_Number val; alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha); alpha_madde(Y[r],val,X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
occupancyGrid.h
/* * Generate 3D occupancy grid from a 3D closed mesh * by R. Falque * 07/02/2020 */ #ifndef OCCUPANCY_GRID_H #define OCCUPANCY_GRID_H #include <Eigen/Core> #include <unsupported/Eigen/CXX11/Tensor> #include <vector> #include <string> #include <iostream> #include <sstream> #include <fstream> #include "EigenTools/getMinMax.h" #include "EigenTools/nanoflannWrapper.h" #include "sgn.h" #include "IO/writePNG.h" #include "IO/process_folder.h" // polyscope wrapper class OccupancyGrid { private: Eigen::MatrixXd vertices_; Eigen::MatrixXd normals_; int grid_resolution_; double bounding_box_scale_; Eigen::Tensor<bool, 3> occupancy_grid_; double grid_size_; Eigen::Vector3d source_; public: OccupancyGrid(Eigen::MatrixXd & vertices, Eigen::MatrixXd & normals, int grid_resolution, double bounding_box_scale) { // store variables in private variables vertices_ = vertices; normals_ = normals; grid_resolution_ = grid_resolution; bounding_box_scale_ = bounding_box_scale; // create the occupancy grid init(); } // destructor ~OccupancyGrid() { } //accessors inline Eigen::Tensor<bool, 3> get_occupancy_grid(){return occupancy_grid_;}; inline double get_grid_size(){return grid_size_;}; inline Eigen::Vector3d get_source(){return source_;}; // Class functions // create the occupancy grid void init() { Eigen::Vector3d min_point, max_point; getMinMax(vertices_, min_point, max_point); //double bounding_box_size = (max_point - min_point).norm() * bounding_box_scale; double bounding_box_size = (max_point - min_point).maxCoeff() * bounding_box_scale_; // diagonal versus max direction double leaf_size = bounding_box_size/(grid_resolution_-1); double inv_leaf_size = 1.0/leaf_size; Eigen::Vector3i min_box, max_box, number_of_bins; min_box << floor(min_point(0)*inv_leaf_size), floor(min_point(1)*inv_leaf_size) , floor(min_point(2)*inv_leaf_size); max_box << floor(max_point(0)*inv_leaf_size), floor(max_point(1)*inv_leaf_size) , floor(max_point(2)*inv_leaf_size); number_of_bins << max_box(0) - min_box(0) + 1, max_box(1) - min_box(1) + 1, max_box(2) - min_box(2) + 1; occupancy_grid_.resize(number_of_bins(0), number_of_bins(1), number_of_bins(2)); nanoflann_wrapper tree(vertices_); for (int x = 0; x < number_of_bins(0); ++x) for (int y = 0; y < number_of_bins(1); ++y) { #pragma omp parallel for for (int z = 0; z < number_of_bins(2); ++z) { std::vector< int > closest_point; Eigen::Vector3d point; point << x, y, z; point *= leaf_size; point += min_point; closest_point = tree.return_k_closest_points(point, 1); /* produce the outer shell only remove the next line if ( (point - vertices.row(closest_point[0]).transpose()).norm() < leaf_size(0)*2 ) grid(x, y, z) = true; else grid(x, y, z) = false; */ // here is the key function occupancy_grid_(x, y, z) = is_positive( ( vertices_.col(closest_point[0]) - point ).dot( normals_.col(closest_point[0]) ) ); } } grid_size_ = leaf_size; source_ = min_point; } // build a graph from the occupied space (there is no garanty of connectivity) inline bool generate_graph(Eigen::MatrixXd & vertices, Eigen::MatrixXi & edges) { std::vector< Eigen::Vector3d > vertices_vector; std::vector< Eigen::Vector2i > edges_vector; Eigen::Vector3d centroid; Eigen::Tensor<int, 3> grid_indices(occupancy_grid_.dimension(0), occupancy_grid_.dimension(1), occupancy_grid_.dimension(2)); // build vertices for (int x = 0; x < occupancy_grid_.dimension(0); ++x) for (int y = 0; y < occupancy_grid_.dimension(1); ++y) for (int z = 0; z < occupancy_grid_.dimension(2); ++z) { grid_indices(x, y, z) = -1; if (occupancy_grid_(x, y, z) == 1) { centroid << x, y, z; centroid *= grid_size_; centroid += source_; vertices_vector.push_back(centroid); grid_indices(x, y, z) = vertices_vector.size(); } } // build edges for (int x = 0; x < occupancy_grid_.dimension(0); ++x) for (int y = 0; y < occupancy_grid_.dimension(1); ++y) for (int z = 0; z < occupancy_grid_.dimension(2); ++z) if (occupancy_grid_(x, y, z) == 1) { // case on x if (x-1>=0) if (occupancy_grid_(x-1, y, z)==1) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x-1, y, z); edges_vector.push_back(edge_temp); } if (x+1<occupancy_grid_.dimension(0)) if (occupancy_grid_(x+1, y, z)==1) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x+1, y, z); edges_vector.push_back(edge_temp); } // case on y if (y-1>=0) if (occupancy_grid_(x, y, z-1)==1) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y-1, z); edges_vector.push_back(edge_temp); } if (y+1<occupancy_grid_.dimension(1)) if (occupancy_grid_(x, y, z+1)==1) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y+1, z); edges_vector.push_back(edge_temp); } // case on z if (z-1>=0) if (occupancy_grid_(x, y, z-1)==1) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y, z-1); edges_vector.push_back(edge_temp); } if (z+1<occupancy_grid_.dimension(2)) if (occupancy_grid_(x, y, z+1)==1) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y, z+1); edges_vector.push_back(edge_temp); } } vertices.resize(3, vertices_vector.size()); for (int i=0; i< vertices_vector.size(); i++) vertices.col(i) = vertices_vector[i]; edges.resize(2, edges_vector.size()); for (int i=0; i< edges_vector.size(); i++) edges.col(i) = edges_vector[i]; return true; }; // print each slice as an image in the provided folder path inline bool print_to_folder(std::string folder_name) { bool folder_exist = does_folder_exist(folder_name); if (!folder_exist) { std::cout << "Error: the folder does not exist\n"; create_folder(folder_name); } empty_folder(folder_name); #pragma omp parallel for for (int i=0; i<occupancy_grid_.dimension(2); i++) { Eigen::Matrix<bool, Eigen::Dynamic, Eigen::Dynamic> slice; Eigen::Tensor<bool, 2> tensor_slice; Eigen::array<long int,3> offset = {0,0,i}; //Starting point Eigen::array<long int,3> extent = {occupancy_grid_.dimension(0),occupancy_grid_.dimension(1),0}; //Finish point tensor_slice = occupancy_grid_.slice(offset, extent).reshape(Eigen::array<long int,2>{occupancy_grid_.dimension(0),occupancy_grid_.dimension(1)}); slice = Eigen::Map<const Eigen::Matrix<bool, Eigen::Dynamic, Eigen::Dynamic>> (tensor_slice.data(), tensor_slice.dimension(0),tensor_slice.dimension(1)); std::stringstream ss; ss << std::setw(3) << std::setfill('0') << i; std::string s = ss.str(); std::string file_name = folder_name + s + ".png"; writePNG(slice, file_name); } std::cout << "Progress: Stack of images written in :" << folder_name << std::endl; return true; }; // print the occupancy grid into a yaml file inline bool print_to_yaml(std::string filename) { std::string chunk_name = filename + ".yaml"; std::ofstream out_file(chunk_name); out_file << "? ''\n"; out_file << ": - size: !list_int\n"; out_file << " - " + std::to_string(32) + "\n"; out_file << " - " + std::to_string(32) + "\n"; out_file << " - " + std::to_string(32) + "\n"; out_file << " - entities: !list_end []\n"; out_file << " - blocks: !list_compound\n"; for (int x = 0; x < occupancy_grid_.dimension(0); ++x) for (int y = 0; y < occupancy_grid_.dimension(1); ++y) for (int z = 0; z < occupancy_grid_.dimension(2); ++z) { if (occupancy_grid_(x, y, z) == 1) { out_file << " - - pos: !list_int\n"; out_file << " - " + std::to_string(x-round(occupancy_grid_.dimension(0)/2)) + "\n"; out_file << " - " + std::to_string(z) + "\n"; out_file << " - " + std::to_string(-y+round(occupancy_grid_.dimension(1)/2)) + "\n"; out_file << " - state: 0\n"; } } out_file << " - author: rFalque\n"; out_file << " - palette: !list_compound\n"; out_file << " - - Properties:\n"; out_file << " - variant: smooth_andesite\n"; out_file << " - Name: minecraft:stone\n"; out_file << " - DataVersion: 2227\n"; out_file.close(); return true; }; inline bool generate_mesh(Eigen::MatrixXd & vertices, Eigen::MatrixXi & faces) { std::vector< Eigen::Vector3d > vertices_vector; std::vector< Eigen::Vector3i > faces_vector; Eigen::Vector3d centroid; for (int x = 0; x < occupancy_grid_.dimension(0); ++x) for (int y = 0; y < occupancy_grid_.dimension(1); ++y) for (int z = 0; z < occupancy_grid_.dimension(2); ++z) { if (occupancy_grid_(x, y, z) == 1) { centroid << x, y, z; centroid *= grid_size_; centroid += source_; make_cube(centroid, grid_size_, vertices_vector, faces_vector); } } vertices.resize(3, vertices_vector.size()); for (int i=0; i< vertices_vector.size(); i++) vertices.col(i) = vertices_vector[i]; faces.resize(3, faces_vector.size()); for (int i=0; i< faces_vector.size(); i++) faces.col(i) = faces_vector[i]; return true; }; inline bool make_cube(Eigen::Vector3d centroid, double size, std::vector< Eigen::Vector3d > & vertices_vector, std::vector< Eigen::Vector3i > & faces_vector) { Eigen::Vector3i vertices_offset = Eigen::Vector3i::Constant(vertices_vector.size()); Eigen::Vector3d v_0 ( 0.5, 0.5, 0.5); Eigen::Vector3d v_1 ( 0.5, 0.5,-0.5); Eigen::Vector3d v_2 ( 0.5,-0.5, 0.5); Eigen::Vector3d v_3 ( 0.5,-0.5,-0.5); Eigen::Vector3d v_4 (-0.5, 0.5, 0.5); Eigen::Vector3d v_5 (-0.5, 0.5,-0.5); Eigen::Vector3d v_6 (-0.5,-0.5, 0.5); Eigen::Vector3d v_7 (-0.5,-0.5,-0.5); vertices_vector.push_back( v_0*size + centroid ); vertices_vector.push_back( v_1*size + centroid ); vertices_vector.push_back( v_2*size + centroid ); vertices_vector.push_back( v_3*size + centroid ); vertices_vector.push_back( v_4*size + centroid ); vertices_vector.push_back( v_5*size + centroid ); vertices_vector.push_back( v_6*size + centroid ); vertices_vector.push_back( v_7*size + centroid ); Eigen::Vector3i f_0 (0, 3, 1); Eigen::Vector3i f_1 (0, 2, 3); Eigen::Vector3i f_2 (0, 1, 5); Eigen::Vector3i f_3 (0, 5, 4); Eigen::Vector3i f_4 (4, 5, 7); Eigen::Vector3i f_5 (4, 7, 6); Eigen::Vector3i f_6 (2, 6, 7); Eigen::Vector3i f_7 (2, 7, 3); Eigen::Vector3i f_8 (1, 3, 7); Eigen::Vector3i f_9 (1, 7, 5); Eigen::Vector3i f_10(0, 4, 2); Eigen::Vector3i f_11(2, 4, 6); faces_vector.push_back(f_0 + vertices_offset); faces_vector.push_back(f_1 + vertices_offset); faces_vector.push_back(f_2 + vertices_offset); faces_vector.push_back(f_3 + vertices_offset); faces_vector.push_back(f_4 + vertices_offset); faces_vector.push_back(f_5 + vertices_offset); faces_vector.push_back(f_6 + vertices_offset); faces_vector.push_back(f_7 + vertices_offset); faces_vector.push_back(f_8 + vertices_offset); faces_vector.push_back(f_9 + vertices_offset); faces_vector.push_back(f_10 + vertices_offset); faces_vector.push_back(f_11 + vertices_offset); return true; }; }; #endif
tls_test_c.c
/* tls_test_c.c -- test TLS common symbol Copyright (C) 2008-2020 Free Software Foundation, Inc. Written by Ian Lance Taylor <iant@google.com> This file is part of gold. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* The only way I know to get gcc to generate a TLS common symbol is to use a C file and an OpenMP directive. */ #include "config.h" #include <stdio.h> #define CHECK_EQ_OR_RETURN(var, expected) \ do \ { \ if ((var) != (expected)) \ { \ printf(#var ": expected %d, found %d\n", expected, var); \ return 0; \ } \ } \ while (0) #ifdef HAVE_OMP_SUPPORT int v7; #pragma omp threadprivate (v7) #endif int t11(void); int t11_last(void); int t11(void) { #ifdef HAVE_OMP_SUPPORT CHECK_EQ_OR_RETURN(v7, 0); v7 = 70; #endif return 1; } int t11_last(void) { #ifdef HAVE_OMP_SUPPORT CHECK_EQ_OR_RETURN(v7, 70); #endif return 1; }
generator.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GENERATOR_H_ #define GENERATOR_H_ #include <algorithm> #include <cinttypes> #include <random> #include "graph.h" #include "pvector.h" #include "util.h" /* GAP Benchmark Suite Class: Generator Author: Scott Beamer Given scale and degree, generates edgelist for synthetic graph - Intended to be called from Builder - GenerateEL(uniform) generates and returns the edgelist - Can generate uniform random (uniform=true) or R-MAT graph according to Graph500 parameters (uniform=false) - Can also randomize weights within a weighted edgelist (InsertWeights) - Blocking/reseeding is for parallelism with deterministic output edgelist */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_> class Generator { typedef EdgePair<NodeID_, DestID_> Edge; typedef EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> WEdge; typedef pvector<Edge> EdgeList; public: Generator(int scale, int degree) { scale_ = scale; num_nodes_ = 1l << scale; num_edges_ = num_nodes_ * degree; if (num_nodes_ > std::numeric_limits<NodeID_>::max()) { std::cout << "NodeID type (max: " << std::numeric_limits<NodeID_>::max(); std::cout << ") too small to hold " << num_nodes_ << std::endl; std::cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)"; std::cout << " to a wider type and recompiling" << std::endl; std::exit(-31); } } void PermuteIDs(EdgeList &el) { pvector<NodeID_> permutation(num_nodes_); std::mt19937 rng(kRandSeed); #pragma omp parallel for for (NodeID_ n = 0; n < num_nodes_; n++) permutation[n] = n; shuffle(permutation.begin(), permutation.end(), rng); #pragma omp parallel for for (int64_t e = 0; e < num_edges_; e++) el[e] = Edge(permutation[el[e].u], permutation[el[e].v]); } EdgeList MakeUniformEL() { EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<NodeID_> udist(0, num_nodes_ - 1); #pragma omp for for (int64_t block = 0; block < num_edges_; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std::min(block + block_size, num_edges_); e++) { el[e] = Edge(udist(rng), udist(rng)); } } } return el; } EdgeList MakeRMatEL() { const float A = 0.57f, B = 0.19f, C = 0.19f; EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_real_distribution<float> udist(0, 1.0f); #pragma omp for for (int64_t block = 0; block < num_edges_; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std::min(block + block_size, num_edges_); e++) { NodeID_ src = 0, dst = 0; for (int depth = 0; depth < scale_; depth++) { float rand_point = udist(rng); src = src << 1; dst = dst << 1; if (rand_point < A + B) { if (rand_point > A) dst++; } else { src++; if (rand_point > A + B + C) dst++; } } el[e] = Edge(src, dst); } } } PermuteIDs(el); // TIME_PRINT("Shuffle", std::shuffle(el.begin(), el.end(), // std::mt19937())); return el; } EdgeList GenerateEL(bool uniform) { EdgeList el; Timer t; t.Start(); if (uniform) el = MakeUniformEL(); else el = MakeRMatEL(); t.Stop(); // PrintTime("Generate Time", t.Seconds()); return el; } static void InsertWeights(pvector<EdgePair<NodeID_, NodeID_>> &el) {} // Overwrites existing weights with random from [1,255] static void InsertWeights(pvector<WEdge> &el) { #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<int> udist(1, 255); int64_t el_size = el.size(); #pragma omp for for (int64_t block = 0; block < el_size; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std::min(block + block_size, el_size); e++) { // el[e].v.w = static_cast<WeightT_>(udist(rng)); el[e].v.w = static_cast<WeightT_>(1); } } } } private: int scale_; int64_t num_nodes_; int64_t num_edges_; static const int64_t block_size = 1 << 18; }; #endif // GENERATOR_H_
main.c
/// /// @copyright Copyright (c) 2016-, Issam SAID <said.issam@gmail.com> /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// 1. Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// 2. Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// 3. Neither the name of the copyright holder nor the names of its /// contributors may be used to endorse or promote products derived from /// this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, /// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT /// HOLDER OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, /// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, /// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR /// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF /// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING /// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS /// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// @file stencil_3d/main.c /// @author Issam SAID /// @brief An example of 3D finite difference stencil code based on /// the ezcu C/C++ interface. /// #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <ezcu/ezcu.h> #define N 320 /// /// @brief The main program of the ezcu based sgemm C/C++ example. /// /// This is the main routine that shows how to use the ezcu C/C++ interface /// to implement a simple matrix to matrix multiplication. /// Note that the CUDA kernel is implemented in a seperate file (sgemm.cl). /// @return Error code if any. /// int main(void) { unsigned int i; float *u; float *v; float *coeffs; unsigned int grid[3] = {N/16, N/16, 1}; unsigned int block[3] = {16, 16, 1}; ezcu_dev_t device; fprintf(stdout, "... start of the ezcu 3D stencil C/C++ example\n"); /// ///< Initialize ezcu with selecting the default GPU. /// ezcu_init(); /// ///< Load the CUDA kernel that runs the stencil computations. /// ezcu_load(PREFIX"/stencil_3d.cu", NULL); /// ///< Get a pointer to the desired device (in this case the default GPU). /// device = ezcu_dev_find(0); u = (float*)malloc((N+8)*(N+8)*(N+8)*sizeof(float)); v = (float*)malloc((N+8)*(N+8)*(N+8)*sizeof(float)); coeffs = (float*)malloc(5*sizeof(float)); memset(u, 0, (N+8)*(N+8)*(N+8)*sizeof(float)); srand (time(NULL)); #pragma omp parallel for private(i) for (i = 0; i< (N+8)*(N+8)*(N+8); ++i) v[i] = (float)(i%100); coeffs[0] = -2.6; coeffs[1] = 1.6; coeffs[2] = -0.2; coeffs[3] = 0.02; coeffs[4] = -0.001; /// ///< Wrap the matrices into ezcu memory objects. /// ezcu_mem_wrap(device, u, (N+8)*(N+8)*(N+8), FLOAT | READ_WRITE | HWA); ezcu_mem_wrap(device, v, (N+8)*(N+8)*(N+8), FLOAT | READ_WRITE | HWA); ezcu_mem_wrap(device, coeffs, 5, FLOAT | READ_ONLY | HWA); /// ///< Set the work size an dimension of the kernel. /// ezcu_knl_set_wrk("stencil_3d", 2, grid, block); /// ///< Run the kernel on the default GPU. /// ezcu_knl_run("stencil_3d", device, v, u, coeffs, N, N, N); /// ///< Update the u buffer on the CPU side so that the results can be seen ///< on the host side. /// ezcu_mem_update(u, READ_ONLY); free(u); free(v); free(coeffs); /// ///< Release ezcu resources. /// ezcu_release(); fprintf(stdout, "... end of the ezcu 3D stencil C/C++ example\n"); return EXIT_SUCCESS; }
GB_unop__tanh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tanh_fp32_fp32) // op(A') function: GB (_unop_tran__tanh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = tanhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tanhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = tanhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tanh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tanh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree-parloops.c
/* Loop autoparallelization. Copyright (C) 2006-2015 Free Software Foundation, Inc. Contributed by Sebastian Pop <pop@cri.ensmp.fr> Zdenek Dvorak <dvorakz@suse.cz> and Razya Ladelsky <razya@il.ibm.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "options.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "predict.h" #include "tm.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "dominance.h" #include "cfg.h" #include "basic-block.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "gimple-expr.h" #include "is-a.h" #include "gimple.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "stor-layout.h" #include "tree-nested.h" #include "gimple-ssa.h" #include "tree-cfg.h" #include "tree-phinodes.h" #include "ssa-iterators.h" #include "stringpool.h" #include "tree-ssanames.h" #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "tree-into-ssa.h" #include "cfgloop.h" #include "tree-data-ref.h" #include "tree-scalar-evolution.h" #include "gimple-pretty-print.h" #include "tree-pass.h" #include "langhooks.h" #include "tree-vectorizer.h" #include "tree-hasher.h" #include "tree-parloops.h" #include "omp-low.h" #include "tree-nested.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" /* This pass tries to distribute iterations of loops into several threads. The implementation is straightforward -- for each loop we test whether its iterations are independent, and if it is the case (and some additional conditions regarding profitability and correctness are satisfied), we add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion machinery do its job. The most of the complexity is in bringing the code into shape expected by the omp expanders: -- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction variable and that the exit test is at the start of the loop body -- for GIMPLE_OMP_PARALLEL, replacing the references to local addressable variables by accesses through pointers, and breaking up ssa chains by storing the values incoming to the parallelized loop to a structure passed to the new function as an argument (something similar is done in omp gimplification, unfortunately only a small part of the code can be shared). TODO: -- if there are several parallelizable loops in a function, it may be possible to generate the threads just once (using synchronization to ensure that cross-loop dependences are obeyed). -- handling of common reduction patterns for outer loops. More info can also be found at http://gcc.gnu.org/wiki/AutoParInGCC */ /* Reduction handling: currently we use vect_force_simple_reduction() to detect reduction patterns. The code transformation will be introduced by an example. parloop { int sum=1; for (i = 0; i < N; i++) { x[i] = i + 3; sum+=x[i]; } } gimple-like code: header_bb: # sum_29 = PHI <sum_11(5), 1(3)> # i_28 = PHI <i_12(5), 0(3)> D.1795_8 = i_28 + 3; x[i_28] = D.1795_8; sum_11 = D.1795_8 + sum_29; i_12 = i_28 + 1; if (N_6(D) > i_12) goto header_bb; exit_bb: # sum_21 = PHI <sum_11(4)> printf (&"%d"[0], sum_21); after reduction transformation (only relevant parts): parloop { .... # Storing the initial value given by the user. # .paral_data_store.32.sum.27 = 1; #pragma omp parallel num_threads(4) #pragma omp for schedule(static) # The neutral element corresponding to the particular reduction's operation, e.g. 0 for PLUS_EXPR, 1 for MULT_EXPR, etc. replaces the user's initial value. # # sum.27_29 = PHI <sum.27_11, 0> sum.27_11 = D.1827_8 + sum.27_29; GIMPLE_OMP_CONTINUE # Adding this reduction phi is done at create_phi_for_local_result() # # sum.27_56 = PHI <sum.27_11, 0> GIMPLE_OMP_RETURN # Creating the atomic operation is done at create_call_for_reduction_1() # #pragma omp atomic_load D.1839_59 = *&.paral_data_load.33_51->reduction.23; D.1840_60 = sum.27_56 + D.1839_59; #pragma omp atomic_store (D.1840_60); GIMPLE_OMP_RETURN # collecting the result after the join of the threads is done at create_loads_for_reductions(). The value computed by the threads is loaded from the shared struct. # .paral_data_load.33_52 = &.paral_data_store.32; sum_37 = .paral_data_load.33_52->sum.27; sum_43 = D.1795_41 + sum_37; exit bb: # sum_21 = PHI <sum_43, sum_26> printf (&"%d"[0], sum_21); ... } */ /* Minimal number of iterations of a loop that should be executed in each thread. */ #define MIN_PER_THREAD 100 /* Element of the hashtable, representing a reduction in the current loop. */ struct reduction_info { gimple reduc_stmt; /* reduction statement. */ gimple reduc_phi; /* The phi node defining the reduction. */ enum tree_code reduction_code;/* code for the reduction operation. */ unsigned reduc_version; /* SSA_NAME_VERSION of original reduc_phi result. */ gphi *keep_res; /* The PHI_RESULT of this phi is the resulting value of the reduction variable when existing the loop. */ tree initial_value; /* The initial value of the reduction var before entering the loop. */ tree field; /* the name of the field in the parloop data structure intended for reduction. */ tree init; /* reduction initialization value. */ gphi *new_phi; /* (helper field) Newly created phi node whose result will be passed to the atomic operation. Represents the local result each thread computed for the reduction operation. */ }; /* Reduction info hashtable helpers. */ struct reduction_hasher : typed_free_remove <reduction_info> { typedef reduction_info value_type; typedef reduction_info compare_type; static inline hashval_t hash (const value_type *); static inline bool equal (const value_type *, const compare_type *); }; /* Equality and hash functions for hashtab code. */ inline bool reduction_hasher::equal (const value_type *a, const compare_type *b) { return (a->reduc_phi == b->reduc_phi); } inline hashval_t reduction_hasher::hash (const value_type *a) { return a->reduc_version; } typedef hash_table<reduction_hasher> reduction_info_table_type; static struct reduction_info * reduction_phi (reduction_info_table_type *reduction_list, gimple phi) { struct reduction_info tmpred, *red; if (reduction_list->elements () == 0 || phi == NULL) return NULL; tmpred.reduc_phi = phi; tmpred.reduc_version = gimple_uid (phi); red = reduction_list->find (&tmpred); return red; } /* Element of hashtable of names to copy. */ struct name_to_copy_elt { unsigned version; /* The version of the name to copy. */ tree new_name; /* The new name used in the copy. */ tree field; /* The field of the structure used to pass the value. */ }; /* Name copies hashtable helpers. */ struct name_to_copy_hasher : typed_free_remove <name_to_copy_elt> { typedef name_to_copy_elt value_type; typedef name_to_copy_elt compare_type; static inline hashval_t hash (const value_type *); static inline bool equal (const value_type *, const compare_type *); }; /* Equality and hash functions for hashtab code. */ inline bool name_to_copy_hasher::equal (const value_type *a, const compare_type *b) { return a->version == b->version; } inline hashval_t name_to_copy_hasher::hash (const value_type *a) { return (hashval_t) a->version; } typedef hash_table<name_to_copy_hasher> name_to_copy_table_type; /* A transformation matrix, which is a self-contained ROWSIZE x COLSIZE matrix. Rather than use floats, we simply keep a single DENOMINATOR that represents the denominator for every element in the matrix. */ typedef struct lambda_trans_matrix_s { lambda_matrix matrix; int rowsize; int colsize; int denominator; } *lambda_trans_matrix; #define LTM_MATRIX(T) ((T)->matrix) #define LTM_ROWSIZE(T) ((T)->rowsize) #define LTM_COLSIZE(T) ((T)->colsize) #define LTM_DENOMINATOR(T) ((T)->denominator) /* Allocate a new transformation matrix. */ static lambda_trans_matrix lambda_trans_matrix_new (int colsize, int rowsize, struct obstack * lambda_obstack) { lambda_trans_matrix ret; ret = (lambda_trans_matrix) obstack_alloc (lambda_obstack, sizeof (struct lambda_trans_matrix_s)); LTM_MATRIX (ret) = lambda_matrix_new (rowsize, colsize, lambda_obstack); LTM_ROWSIZE (ret) = rowsize; LTM_COLSIZE (ret) = colsize; LTM_DENOMINATOR (ret) = 1; return ret; } /* Multiply a vector VEC by a matrix MAT. MAT is an M*N matrix, and VEC is a vector with length N. The result is stored in DEST which must be a vector of length M. */ static void lambda_matrix_vector_mult (lambda_matrix matrix, int m, int n, lambda_vector vec, lambda_vector dest) { int i, j; lambda_vector_clear (dest, m); for (i = 0; i < m; i++) for (j = 0; j < n; j++) dest[i] += matrix[i][j] * vec[j]; } /* Return true if TRANS is a legal transformation matrix that respects the dependence vectors in DISTS and DIRS. The conservative answer is false. "Wolfe proves that a unimodular transformation represented by the matrix T is legal when applied to a loop nest with a set of lexicographically non-negative distance vectors RDG if and only if for each vector d in RDG, (T.d >= 0) is lexicographically positive. i.e.: if and only if it transforms the lexicographically positive distance vectors to lexicographically positive vectors. Note that a unimodular matrix must transform the zero vector (and only it) to the zero vector." S.Muchnick. */ static bool lambda_transform_legal_p (lambda_trans_matrix trans, int nb_loops, vec<ddr_p> dependence_relations) { unsigned int i, j; lambda_vector distres; struct data_dependence_relation *ddr; gcc_assert (LTM_COLSIZE (trans) == nb_loops && LTM_ROWSIZE (trans) == nb_loops); /* When there are no dependences, the transformation is correct. */ if (dependence_relations.length () == 0) return true; ddr = dependence_relations[0]; if (ddr == NULL) return true; /* When there is an unknown relation in the dependence_relations, we know that it is no worth looking at this loop nest: give up. */ if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) return false; distres = lambda_vector_new (nb_loops); /* For each distance vector in the dependence graph. */ FOR_EACH_VEC_ELT (dependence_relations, i, ddr) { /* Don't care about relations for which we know that there is no dependence, nor about read-read (aka. output-dependences): these data accesses can happen in any order. */ if (DDR_ARE_DEPENDENT (ddr) == chrec_known || (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr)))) continue; /* Conservatively answer: "this transformation is not valid". */ if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) return false; /* If the dependence could not be captured by a distance vector, conservatively answer that the transform is not valid. */ if (DDR_NUM_DIST_VECTS (ddr) == 0) return false; /* Compute trans.dist_vect */ for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++) { lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops, DDR_DIST_VECT (ddr, j), distres); if (!lambda_vector_lexico_pos (distres, nb_loops)) return false; } } return true; } /* Data dependency analysis. Returns true if the iterations of LOOP are independent on each other (that is, if we can execute them in parallel). */ static bool loop_parallel_p (struct loop *loop, struct obstack * parloop_obstack) { vec<ddr_p> dependence_relations; vec<data_reference_p> datarefs; lambda_trans_matrix trans; bool ret = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Considering loop %d\n", loop->num); if (!loop->inner) fprintf (dump_file, "loop is innermost\n"); else fprintf (dump_file, "loop NOT innermost\n"); } /* Check for problems with dependences. If the loop can be reversed, the iterations are independent. */ auto_vec<loop_p, 3> loop_nest; datarefs.create (10); dependence_relations.create (100); if (! compute_data_dependences_for_loop (loop, true, &loop_nest, &datarefs, &dependence_relations)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " FAILED: cannot analyze data dependencies\n"); ret = false; goto end; } if (dump_file && (dump_flags & TDF_DETAILS)) dump_data_dependence_relations (dump_file, dependence_relations); trans = lambda_trans_matrix_new (1, 1, parloop_obstack); LTM_MATRIX (trans)[0][0] = -1; if (lambda_transform_legal_p (trans, 1, dependence_relations)) { ret = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " SUCCESS: may be parallelized\n"); } else if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " FAILED: data dependencies exist across iterations\n"); end: free_dependence_relations (dependence_relations); free_data_refs (datarefs); return ret; } /* Return true when LOOP contains basic blocks marked with the BB_IRREDUCIBLE_LOOP flag. */ static inline bool loop_has_blocks_with_irreducible_flag (struct loop *loop) { unsigned i; basic_block *bbs = get_loop_body_in_dom_order (loop); bool res = true; for (i = 0; i < loop->num_nodes; i++) if (bbs[i]->flags & BB_IRREDUCIBLE_LOOP) goto end; res = false; end: free (bbs); return res; } /* Assigns the address of OBJ in TYPE to an ssa name, and returns this name. The assignment statement is placed on edge ENTRY. DECL_ADDRESS maps decls to their addresses that can be reused. The address of OBJ is known to be invariant in the whole function. Other needed statements are placed right before GSI. */ static tree take_address_of (tree obj, tree type, edge entry, int_tree_htab_type *decl_address, gimple_stmt_iterator *gsi) { int uid; tree *var_p, name, addr; gassign *stmt; gimple_seq stmts; /* Since the address of OBJ is invariant, the trees may be shared. Avoid rewriting unrelated parts of the code. */ obj = unshare_expr (obj); for (var_p = &obj; handled_component_p (*var_p); var_p = &TREE_OPERAND (*var_p, 0)) continue; /* Canonicalize the access to base on a MEM_REF. */ if (DECL_P (*var_p)) *var_p = build_simple_mem_ref (build_fold_addr_expr (*var_p)); /* Assign a canonical SSA name to the address of the base decl used in the address and share it for all accesses and addresses based on it. */ uid = DECL_UID (TREE_OPERAND (TREE_OPERAND (*var_p, 0), 0)); int_tree_map elt; elt.uid = uid; int_tree_map *slot = decl_address->find_slot (elt, INSERT); if (!slot->to) { if (gsi == NULL) return NULL; addr = TREE_OPERAND (*var_p, 0); const char *obj_name = get_name (TREE_OPERAND (TREE_OPERAND (*var_p, 0), 0)); if (obj_name) name = make_temp_ssa_name (TREE_TYPE (addr), NULL, obj_name); else name = make_ssa_name (TREE_TYPE (addr)); stmt = gimple_build_assign (name, addr); gsi_insert_on_edge_immediate (entry, stmt); slot->uid = uid; slot->to = name; } else name = slot->to; /* Express the address in terms of the canonical SSA name. */ TREE_OPERAND (*var_p, 0) = name; if (gsi == NULL) return build_fold_addr_expr_with_type (obj, type); name = force_gimple_operand (build_addr (obj, current_function_decl), &stmts, true, NULL_TREE); if (!gimple_seq_empty_p (stmts)) gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); if (!useless_type_conversion_p (type, TREE_TYPE (name))) { name = force_gimple_operand (fold_convert (type, name), &stmts, true, NULL_TREE); if (!gimple_seq_empty_p (stmts)) gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); } return name; } /* Callback for htab_traverse. Create the initialization statement for reduction described in SLOT, and place it at the preheader of the loop described in DATA. */ int initialize_reductions (reduction_info **slot, struct loop *loop) { tree init, c; tree bvar, type, arg; edge e; struct reduction_info *const reduc = *slot; /* Create initialization in preheader: reduction_variable = initialization value of reduction. */ /* In the phi node at the header, replace the argument coming from the preheader with the reduction initialization value. */ /* Create a new variable to initialize the reduction. */ type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi)); bvar = create_tmp_var (type, "reduction"); c = build_omp_clause (gimple_location (reduc->reduc_stmt), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code; OMP_CLAUSE_DECL (c) = SSA_NAME_VAR (gimple_assign_lhs (reduc->reduc_stmt)); init = omp_reduction_init (c, TREE_TYPE (bvar)); reduc->init = init; /* Replace the argument representing the initialization value with the initialization value for the reduction (neutral element for the particular operation, e.g. 0 for PLUS_EXPR, 1 for MULT_EXPR, etc). Keep the old value in a new variable "reduction_initial", that will be taken in consideration after the parallel computing is done. */ e = loop_preheader_edge (loop); arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e); /* Create new variable to hold the initial value. */ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (reduc->reduc_phi, loop_preheader_edge (loop)), init); reduc->initial_value = arg; return 1; } struct elv_data { struct walk_stmt_info info; edge entry; int_tree_htab_type *decl_address; gimple_stmt_iterator *gsi; bool changed; bool reset; }; /* Eliminates references to local variables in *TP out of the single entry single exit region starting at DTA->ENTRY. DECL_ADDRESS contains addresses of the references that had their address taken already. If the expression is changed, CHANGED is set to true. Callback for walk_tree. */ static tree eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data) { struct elv_data *const dta = (struct elv_data *) data; tree t = *tp, var, addr, addr_type, type, obj; if (DECL_P (t)) { *walk_subtrees = 0; if (!SSA_VAR_P (t) || DECL_EXTERNAL (t)) return NULL_TREE; type = TREE_TYPE (t); addr_type = build_pointer_type (type); addr = take_address_of (t, addr_type, dta->entry, dta->decl_address, dta->gsi); if (dta->gsi == NULL && addr == NULL_TREE) { dta->reset = true; return NULL_TREE; } *tp = build_simple_mem_ref (addr); dta->changed = true; return NULL_TREE; } if (TREE_CODE (t) == ADDR_EXPR) { /* ADDR_EXPR may appear in two contexts: -- as a gimple operand, when the address taken is a function invariant -- as gimple rhs, when the resulting address in not a function invariant We do not need to do anything special in the latter case (the base of the memory reference whose address is taken may be replaced in the DECL_P case). The former case is more complicated, as we need to ensure that the new address is still a gimple operand. Thus, it is not sufficient to replace just the base of the memory reference -- we need to move the whole computation of the address out of the loop. */ if (!is_gimple_val (t)) return NULL_TREE; *walk_subtrees = 0; obj = TREE_OPERAND (t, 0); var = get_base_address (obj); if (!var || !SSA_VAR_P (var) || DECL_EXTERNAL (var)) return NULL_TREE; addr_type = TREE_TYPE (t); addr = take_address_of (obj, addr_type, dta->entry, dta->decl_address, dta->gsi); if (dta->gsi == NULL && addr == NULL_TREE) { dta->reset = true; return NULL_TREE; } *tp = addr; dta->changed = true; return NULL_TREE; } if (!EXPR_P (t)) *walk_subtrees = 0; return NULL_TREE; } /* Moves the references to local variables in STMT at *GSI out of the single entry single exit region starting at ENTRY. DECL_ADDRESS contains addresses of the references that had their address taken already. */ static void eliminate_local_variables_stmt (edge entry, gimple_stmt_iterator *gsi, int_tree_htab_type *decl_address) { struct elv_data dta; gimple stmt = gsi_stmt (*gsi); memset (&dta.info, '\0', sizeof (dta.info)); dta.entry = entry; dta.decl_address = decl_address; dta.changed = false; dta.reset = false; if (gimple_debug_bind_p (stmt)) { dta.gsi = NULL; walk_tree (gimple_debug_bind_get_value_ptr (stmt), eliminate_local_variables_1, &dta.info, NULL); if (dta.reset) { gimple_debug_bind_reset_value (stmt); dta.changed = true; } } else if (gimple_clobber_p (stmt)) { stmt = gimple_build_nop (); gsi_replace (gsi, stmt, false); dta.changed = true; } else { dta.gsi = gsi; walk_gimple_op (stmt, eliminate_local_variables_1, &dta.info); } if (dta.changed) update_stmt (stmt); } /* Eliminates the references to local variables from the single entry single exit region between the ENTRY and EXIT edges. This includes: 1) Taking address of a local variable -- these are moved out of the region (and temporary variable is created to hold the address if necessary). 2) Dereferencing a local variable -- these are replaced with indirect references. */ static void eliminate_local_variables (edge entry, edge exit) { basic_block bb; auto_vec<basic_block, 3> body; unsigned i; gimple_stmt_iterator gsi; bool has_debug_stmt = false; int_tree_htab_type decl_address (10); basic_block entry_bb = entry->src; basic_block exit_bb = exit->dest; gather_blocks_in_sese_region (entry_bb, exit_bb, &body); FOR_EACH_VEC_ELT (body, i, bb) if (bb != entry_bb && bb != exit_bb) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (is_gimple_debug (gsi_stmt (gsi))) { if (gimple_debug_bind_p (gsi_stmt (gsi))) has_debug_stmt = true; } else eliminate_local_variables_stmt (entry, &gsi, &decl_address); if (has_debug_stmt) FOR_EACH_VEC_ELT (body, i, bb) if (bb != entry_bb && bb != exit_bb) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (gimple_debug_bind_p (gsi_stmt (gsi))) eliminate_local_variables_stmt (entry, &gsi, &decl_address); } /* Returns true if expression EXPR is not defined between ENTRY and EXIT, i.e. if all its operands are defined outside of the region. */ static bool expr_invariant_in_region_p (edge entry, edge exit, tree expr) { basic_block entry_bb = entry->src; basic_block exit_bb = exit->dest; basic_block def_bb; if (is_gimple_min_invariant (expr)) return true; if (TREE_CODE (expr) == SSA_NAME) { def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr)); if (def_bb && dominated_by_p (CDI_DOMINATORS, def_bb, entry_bb) && !dominated_by_p (CDI_DOMINATORS, def_bb, exit_bb)) return false; return true; } return false; } /* If COPY_NAME_P is true, creates and returns a duplicate of NAME. The copies are stored to NAME_COPIES, if NAME was already duplicated, its duplicate stored in NAME_COPIES is returned. Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also duplicated, storing the copies in DECL_COPIES. */ static tree separate_decls_in_region_name (tree name, name_to_copy_table_type *name_copies, int_tree_htab_type *decl_copies, bool copy_name_p) { tree copy, var, var_copy; unsigned idx, uid, nuid; struct int_tree_map ielt; struct name_to_copy_elt elt, *nelt; name_to_copy_elt **slot; int_tree_map *dslot; if (TREE_CODE (name) != SSA_NAME) return name; idx = SSA_NAME_VERSION (name); elt.version = idx; slot = name_copies->find_slot_with_hash (&elt, idx, copy_name_p ? INSERT : NO_INSERT); if (slot && *slot) return (*slot)->new_name; if (copy_name_p) { copy = duplicate_ssa_name (name, NULL); nelt = XNEW (struct name_to_copy_elt); nelt->version = idx; nelt->new_name = copy; nelt->field = NULL_TREE; *slot = nelt; } else { gcc_assert (!slot); copy = name; } var = SSA_NAME_VAR (name); if (!var) return copy; uid = DECL_UID (var); ielt.uid = uid; dslot = decl_copies->find_slot_with_hash (ielt, uid, INSERT); if (!dslot->to) { var_copy = create_tmp_var (TREE_TYPE (var), get_name (var)); DECL_GIMPLE_REG_P (var_copy) = DECL_GIMPLE_REG_P (var); dslot->uid = uid; dslot->to = var_copy; /* Ensure that when we meet this decl next time, we won't duplicate it again. */ nuid = DECL_UID (var_copy); ielt.uid = nuid; dslot = decl_copies->find_slot_with_hash (ielt, nuid, INSERT); gcc_assert (!dslot->to); dslot->uid = nuid; dslot->to = var_copy; } else var_copy = dslot->to; replace_ssa_name_symbol (copy, var_copy); return copy; } /* Finds the ssa names used in STMT that are defined outside the region between ENTRY and EXIT and replaces such ssa names with their duplicates. The duplicates are stored to NAME_COPIES. Base decls of all ssa names used in STMT (including those defined in LOOP) are replaced with the new temporary variables; the replacement decls are stored in DECL_COPIES. */ static void separate_decls_in_region_stmt (edge entry, edge exit, gimple stmt, name_to_copy_table_type *name_copies, int_tree_htab_type *decl_copies) { use_operand_p use; def_operand_p def; ssa_op_iter oi; tree name, copy; bool copy_name_p; FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF) { name = DEF_FROM_PTR (def); gcc_assert (TREE_CODE (name) == SSA_NAME); copy = separate_decls_in_region_name (name, name_copies, decl_copies, false); gcc_assert (copy == name); } FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE) { name = USE_FROM_PTR (use); if (TREE_CODE (name) != SSA_NAME) continue; copy_name_p = expr_invariant_in_region_p (entry, exit, name); copy = separate_decls_in_region_name (name, name_copies, decl_copies, copy_name_p); SET_USE (use, copy); } } /* Finds the ssa names used in STMT that are defined outside the region between ENTRY and EXIT and replaces such ssa names with their duplicates. The duplicates are stored to NAME_COPIES. Base decls of all ssa names used in STMT (including those defined in LOOP) are replaced with the new temporary variables; the replacement decls are stored in DECL_COPIES. */ static bool separate_decls_in_region_debug (gimple stmt, name_to_copy_table_type *name_copies, int_tree_htab_type *decl_copies) { use_operand_p use; ssa_op_iter oi; tree var, name; struct int_tree_map ielt; struct name_to_copy_elt elt; name_to_copy_elt **slot; int_tree_map *dslot; if (gimple_debug_bind_p (stmt)) var = gimple_debug_bind_get_var (stmt); else if (gimple_debug_source_bind_p (stmt)) var = gimple_debug_source_bind_get_var (stmt); else return true; if (TREE_CODE (var) == DEBUG_EXPR_DECL || TREE_CODE (var) == LABEL_DECL) return true; gcc_assert (DECL_P (var) && SSA_VAR_P (var)); ielt.uid = DECL_UID (var); dslot = decl_copies->find_slot_with_hash (ielt, ielt.uid, NO_INSERT); if (!dslot) return true; if (gimple_debug_bind_p (stmt)) gimple_debug_bind_set_var (stmt, dslot->to); else if (gimple_debug_source_bind_p (stmt)) gimple_debug_source_bind_set_var (stmt, dslot->to); FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE) { name = USE_FROM_PTR (use); if (TREE_CODE (name) != SSA_NAME) continue; elt.version = SSA_NAME_VERSION (name); slot = name_copies->find_slot_with_hash (&elt, elt.version, NO_INSERT); if (!slot) { gimple_debug_bind_reset_value (stmt); update_stmt (stmt); break; } SET_USE (use, (*slot)->new_name); } return false; } /* Callback for htab_traverse. Adds a field corresponding to the reduction specified in SLOT. The type is passed in DATA. */ int add_field_for_reduction (reduction_info **slot, tree type) { struct reduction_info *const red = *slot; tree var = gimple_assign_lhs (red->reduc_stmt); tree field = build_decl (gimple_location (red->reduc_stmt), FIELD_DECL, SSA_NAME_IDENTIFIER (var), TREE_TYPE (var)); insert_field_into_struct (type, field); red->field = field; return 1; } /* Callback for htab_traverse. Adds a field corresponding to a ssa name described in SLOT. The type is passed in DATA. */ int add_field_for_name (name_to_copy_elt **slot, tree type) { struct name_to_copy_elt *const elt = *slot; tree name = ssa_name (elt->version); tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, SSA_NAME_IDENTIFIER (name), TREE_TYPE (name)); insert_field_into_struct (type, field); elt->field = field; return 1; } /* Callback for htab_traverse. A local result is the intermediate result computed by a single thread, or the initial value in case no iteration was executed. This function creates a phi node reflecting these values. The phi's result will be stored in NEW_PHI field of the reduction's data structure. */ int create_phi_for_local_result (reduction_info **slot, struct loop *loop) { struct reduction_info *const reduc = *slot; edge e; gphi *new_phi; basic_block store_bb; tree local_res; source_location locus; /* STORE_BB is the block where the phi should be stored. It is the destination of the loop exit. (Find the fallthru edge from GIMPLE_OMP_CONTINUE). */ store_bb = FALLTHRU_EDGE (loop->latch)->dest; /* STORE_BB has two predecessors. One coming from the loop (the reduction's result is computed at the loop), and another coming from a block preceding the loop, when no iterations are executed (the initial value should be taken). */ if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch)) e = EDGE_PRED (store_bb, 1); else e = EDGE_PRED (store_bb, 0); local_res = copy_ssa_name (gimple_assign_lhs (reduc->reduc_stmt)); locus = gimple_location (reduc->reduc_stmt); new_phi = create_phi_node (local_res, store_bb); add_phi_arg (new_phi, reduc->init, e, locus); add_phi_arg (new_phi, gimple_assign_lhs (reduc->reduc_stmt), FALLTHRU_EDGE (loop->latch), locus); reduc->new_phi = new_phi; return 1; } struct clsn_data { tree store; tree load; basic_block store_bb; basic_block load_bb; }; /* Callback for htab_traverse. Create an atomic instruction for the reduction described in SLOT. DATA annotates the place in memory the atomic operation relates to, and the basic block it needs to be generated in. */ int create_call_for_reduction_1 (reduction_info **slot, struct clsn_data *clsn_data) { struct reduction_info *const reduc = *slot; gimple_stmt_iterator gsi; tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi)); tree load_struct; basic_block bb; basic_block new_bb; edge e; tree t, addr, ref, x; tree tmp_load, name; gimple load; load_struct = build_simple_mem_ref (clsn_data->load); t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE); addr = build_addr (t, current_function_decl); /* Create phi node. */ bb = clsn_data->load_bb; gsi = gsi_last_bb (bb); e = split_block (bb, gsi_stmt (gsi)); new_bb = e->dest; tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr))); tmp_load = make_ssa_name (tmp_load); load = gimple_build_omp_atomic_load (tmp_load, addr); SSA_NAME_DEF_STMT (tmp_load) = load; gsi = gsi_start_bb (new_bb); gsi_insert_after (&gsi, load, GSI_NEW_STMT); e = split_block (new_bb, load); new_bb = e->dest; gsi = gsi_start_bb (new_bb); ref = tmp_load; x = fold_build2 (reduc->reduction_code, TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref, PHI_RESULT (reduc->new_phi)); name = force_gimple_operand_gsi (&gsi, x, true, NULL_TREE, true, GSI_CONTINUE_LINKING); gsi_insert_after (&gsi, gimple_build_omp_atomic_store (name), GSI_NEW_STMT); return 1; } /* Create the atomic operation at the join point of the threads. REDUCTION_LIST describes the reductions in the LOOP. LD_ST_DATA describes the shared data structure where shared data is stored in and loaded from. */ static void create_call_for_reduction (struct loop *loop, reduction_info_table_type *reduction_list, struct clsn_data *ld_st_data) { reduction_list->traverse <struct loop *, create_phi_for_local_result> (loop); /* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */ ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest; reduction_list ->traverse <struct clsn_data *, create_call_for_reduction_1> (ld_st_data); } /* Callback for htab_traverse. Loads the final reduction value at the join point of all threads, and inserts it in the right place. */ int create_loads_for_reductions (reduction_info **slot, struct clsn_data *clsn_data) { struct reduction_info *const red = *slot; gimple stmt; gimple_stmt_iterator gsi; tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt)); tree load_struct; tree name; tree x; gsi = gsi_after_labels (clsn_data->load_bb); load_struct = build_simple_mem_ref (clsn_data->load); load_struct = build3 (COMPONENT_REF, type, load_struct, red->field, NULL_TREE); x = load_struct; name = PHI_RESULT (red->keep_res); stmt = gimple_build_assign (name, x); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); for (gsi = gsi_start_phis (gimple_bb (red->keep_res)); !gsi_end_p (gsi); gsi_next (&gsi)) if (gsi_stmt (gsi) == red->keep_res) { remove_phi_node (&gsi, false); return 1; } gcc_unreachable (); } /* Load the reduction result that was stored in LD_ST_DATA. REDUCTION_LIST describes the list of reductions that the loads should be generated for. */ static void create_final_loads_for_reduction (reduction_info_table_type *reduction_list, struct clsn_data *ld_st_data) { gimple_stmt_iterator gsi; tree t; gimple stmt; gsi = gsi_after_labels (ld_st_data->load_bb); t = build_fold_addr_expr (ld_st_data->store); stmt = gimple_build_assign (ld_st_data->load, t); gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); reduction_list ->traverse <struct clsn_data *, create_loads_for_reductions> (ld_st_data); } /* Callback for htab_traverse. Store the neutral value for the particular reduction's operation, e.g. 0 for PLUS_EXPR, 1 for MULT_EXPR, etc. into the reduction field. The reduction is specified in SLOT. The store information is passed in DATA. */ int create_stores_for_reduction (reduction_info **slot, struct clsn_data *clsn_data) { struct reduction_info *const red = *slot; tree t; gimple stmt; gimple_stmt_iterator gsi; tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt)); gsi = gsi_last_bb (clsn_data->store_bb); t = build3 (COMPONENT_REF, type, clsn_data->store, red->field, NULL_TREE); stmt = gimple_build_assign (t, red->initial_value); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); return 1; } /* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and store to a field of STORE in STORE_BB for the ssa name and its duplicate specified in SLOT. */ int create_loads_and_stores_for_name (name_to_copy_elt **slot, struct clsn_data *clsn_data) { struct name_to_copy_elt *const elt = *slot; tree t; gimple stmt; gimple_stmt_iterator gsi; tree type = TREE_TYPE (elt->new_name); tree load_struct; gsi = gsi_last_bb (clsn_data->store_bb); t = build3 (COMPONENT_REF, type, clsn_data->store, elt->field, NULL_TREE); stmt = gimple_build_assign (t, ssa_name (elt->version)); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); gsi = gsi_last_bb (clsn_data->load_bb); load_struct = build_simple_mem_ref (clsn_data->load); t = build3 (COMPONENT_REF, type, load_struct, elt->field, NULL_TREE); stmt = gimple_build_assign (elt->new_name, t); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); return 1; } /* Moves all the variables used in LOOP and defined outside of it (including the initial values of loop phi nodes, and *PER_THREAD if it is a ssa name) to a structure created for this purpose. The code while (1) { use (a); use (b); } is transformed this way: bb0: old.a = a; old.b = b; bb1: a' = new->a; b' = new->b; while (1) { use (a'); use (b'); } `old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The pointer `new' is intentionally not initialized (the loop will be split to a separate function later, and `new' will be initialized from its arguments). LD_ST_DATA holds information about the shared data structure used to pass information among the threads. It is initialized here, and gen_parallel_loop will pass it to create_call_for_reduction that needs this information. REDUCTION_LIST describes the reductions in LOOP. */ static void separate_decls_in_region (edge entry, edge exit, reduction_info_table_type *reduction_list, tree *arg_struct, tree *new_arg_struct, struct clsn_data *ld_st_data) { basic_block bb1 = split_edge (entry); basic_block bb0 = single_pred (bb1); name_to_copy_table_type name_copies (10); int_tree_htab_type decl_copies (10); unsigned i; tree type, type_name, nvar; gimple_stmt_iterator gsi; struct clsn_data clsn_data; auto_vec<basic_block, 3> body; basic_block bb; basic_block entry_bb = bb1; basic_block exit_bb = exit->dest; bool has_debug_stmt = false; entry = single_succ_edge (entry_bb); gather_blocks_in_sese_region (entry_bb, exit_bb, &body); FOR_EACH_VEC_ELT (body, i, bb) { if (bb != entry_bb && bb != exit_bb) { for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi), &name_copies, &decl_copies); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) has_debug_stmt = true; else separate_decls_in_region_stmt (entry, exit, stmt, &name_copies, &decl_copies); } } } /* Now process debug bind stmts. We must not create decls while processing debug stmts, so we defer their processing so as to make sure we will have debug info for as many variables as possible (all of those that were dealt with in the loop above), and discard those for which we know there's nothing we can do. */ if (has_debug_stmt) FOR_EACH_VEC_ELT (body, i, bb) if (bb != entry_bb && bb != exit_bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);) { gimple stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) { if (separate_decls_in_region_debug (stmt, &name_copies, &decl_copies)) { gsi_remove (&gsi, true); continue; } } gsi_next (&gsi); } } if (name_copies.elements () == 0 && reduction_list->elements () == 0) { /* It may happen that there is nothing to copy (if there are only loop carried and external variables in the loop). */ *arg_struct = NULL; *new_arg_struct = NULL; } else { /* Create the type for the structure to store the ssa names to. */ type = lang_hooks.types.make_type (RECORD_TYPE); type_name = build_decl (UNKNOWN_LOCATION, TYPE_DECL, create_tmp_var_name (".paral_data"), type); TYPE_NAME (type) = type_name; name_copies.traverse <tree, add_field_for_name> (type); if (reduction_list && reduction_list->elements () > 0) { /* Create the fields for reductions. */ reduction_list->traverse <tree, add_field_for_reduction> (type); } layout_type (type); /* Create the loads and stores. */ *arg_struct = create_tmp_var (type, ".paral_data_store"); nvar = create_tmp_var (build_pointer_type (type), ".paral_data_load"); *new_arg_struct = make_ssa_name (nvar); ld_st_data->store = *arg_struct; ld_st_data->load = *new_arg_struct; ld_st_data->store_bb = bb0; ld_st_data->load_bb = bb1; name_copies .traverse <struct clsn_data *, create_loads_and_stores_for_name> (ld_st_data); /* Load the calculation from memory (after the join of the threads). */ if (reduction_list && reduction_list->elements () > 0) { reduction_list ->traverse <struct clsn_data *, create_stores_for_reduction> (ld_st_data); clsn_data.load = make_ssa_name (nvar); clsn_data.load_bb = exit->dest; clsn_data.store = ld_st_data->store; create_final_loads_for_reduction (reduction_list, &clsn_data); } } } /* Returns true if FN was created to run in parallel. */ bool parallelized_function_p (tree fndecl) { cgraph_node *node = cgraph_node::get (fndecl); gcc_assert (node != NULL); return node->parallelized_function; } /* Creates and returns an empty function that will receive the body of a parallelized loop. */ static tree create_loop_fn (location_t loc) { char buf[100]; char *tname; tree decl, type, name, t; struct function *act_cfun = cfun; static unsigned loopfn_num; loc = LOCATION_LOCUS (loc); snprintf (buf, 100, "%s.$loopfn", current_function_name ()); ASM_FORMAT_PRIVATE_NAME (tname, buf, loopfn_num++); clean_symbol_name (tname); name = get_identifier (tname); type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (loc, FUNCTION_DECL, name, type); TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); t = build_decl (loc, RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_RESULT (decl) = t; t = build_decl (loc, PARM_DECL, get_identifier (".paral_data_param"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; allocate_struct_function (decl, false); /* The call to allocate_struct_function clobbers CFUN, so we need to restore it. */ set_cfun (act_cfun); return decl; } /* Moves the exit condition of LOOP to the beginning of its header, and duplicates the part of the last iteration that gets disabled to the exit of the loop. NIT is the number of iterations of the loop (used to initialize the variables in the duplicated part). TODO: the common case is that latch of the loop is empty and immediately follows the loop exit. In this case, it would be better not to copy the body of the loop, but only move the entry of the loop directly before the exit check and increase the number of iterations of the loop by one. This may need some additional preconditioning in case NIT = ~0. REDUCTION_LIST describes the reductions in LOOP. */ static void transform_to_exit_first_loop (struct loop *loop, reduction_info_table_type *reduction_list, tree nit) { basic_block *bbs, *nbbs, ex_bb, orig_header; unsigned n; bool ok; edge exit = single_dom_exit (loop), hpred; tree control, control_name, res, t; gphi *phi, *nphi; gassign *stmt; gcond *cond_stmt, *cond_nit; tree nit_1; split_block_after_labels (loop->header); orig_header = single_succ (loop->header); hpred = single_succ_edge (loop->header); cond_stmt = as_a <gcond *> (last_stmt (exit->src)); control = gimple_cond_lhs (cond_stmt); gcc_assert (gimple_cond_rhs (cond_stmt) == nit); /* Make sure that we have phi nodes on exit for all loop header phis (create_parallel_loop requires that). */ for (gphi_iterator gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi)) { phi = gsi.phi (); res = PHI_RESULT (phi); t = copy_ssa_name (res, phi); SET_PHI_RESULT (phi, t); nphi = create_phi_node (res, orig_header); add_phi_arg (nphi, t, hpred, UNKNOWN_LOCATION); if (res == control) { gimple_cond_set_lhs (cond_stmt, t); update_stmt (cond_stmt); control = t; } } bbs = get_loop_body_in_dom_order (loop); for (n = 0; bbs[n] != exit->src; n++) continue; nbbs = XNEWVEC (basic_block, n); ok = gimple_duplicate_sese_tail (single_succ_edge (loop->header), exit, bbs + 1, n, nbbs); gcc_assert (ok); free (bbs); ex_bb = nbbs[0]; free (nbbs); /* Other than reductions, the only gimple reg that should be copied out of the loop is the control variable. */ exit = single_dom_exit (loop); control_name = NULL_TREE; for (gphi_iterator gsi = gsi_start_phis (ex_bb); !gsi_end_p (gsi); ) { phi = gsi.phi (); res = PHI_RESULT (phi); if (virtual_operand_p (res)) { gsi_next (&gsi); continue; } /* Check if it is a part of reduction. If it is, keep the phi at the reduction's keep_res field. The PHI_RESULT of this phi is the resulting value of the reduction variable when exiting the loop. */ if (reduction_list->elements () > 0) { struct reduction_info *red; tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit); red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val)); if (red) { red->keep_res = phi; gsi_next (&gsi); continue; } } gcc_assert (control_name == NULL_TREE && SSA_NAME_VAR (res) == SSA_NAME_VAR (control)); control_name = res; remove_phi_node (&gsi, false); } gcc_assert (control_name != NULL_TREE); /* Initialize the control variable to number of iterations according to the rhs of the exit condition. */ gimple_stmt_iterator gsi = gsi_after_labels (ex_bb); cond_nit = as_a <gcond *> (last_stmt (exit->src)); nit_1 = gimple_cond_rhs (cond_nit); nit_1 = force_gimple_operand_gsi (&gsi, fold_convert (TREE_TYPE (control_name), nit_1), false, NULL_TREE, false, GSI_SAME_STMT); stmt = gimple_build_assign (control_name, nit_1); gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); } /* Create the parallel constructs for LOOP as described in gen_parallel_loop. LOOP_FN and DATA are the arguments of GIMPLE_OMP_PARALLEL. NEW_DATA is the variable that should be initialized from the argument of LOOP_FN. N_THREADS is the requested number of threads. Returns the basic block containing GIMPLE_OMP_PARALLEL tree. */ static basic_block create_parallel_loop (struct loop *loop, tree loop_fn, tree data, tree new_data, unsigned n_threads, location_t loc) { gimple_stmt_iterator gsi; basic_block bb, paral_bb, for_bb, ex_bb; tree t, param; gomp_parallel *omp_par_stmt; gimple omp_return_stmt1, omp_return_stmt2; gimple phi; gcond *cond_stmt; gomp_for *for_stmt; gomp_continue *omp_cont_stmt; tree cvar, cvar_init, initvar, cvar_next, cvar_base, type; edge exit, nexit, guard, end, e; /* Prepare the GIMPLE_OMP_PARALLEL statement. */ bb = loop_preheader_edge (loop)->src; paral_bb = single_pred (bb); gsi = gsi_last_bb (paral_bb); t = build_omp_clause (loc, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (t) = build_int_cst (integer_type_node, n_threads); omp_par_stmt = gimple_build_omp_parallel (NULL, t, loop_fn, data); gimple_set_location (omp_par_stmt, loc); gsi_insert_after (&gsi, omp_par_stmt, GSI_NEW_STMT); /* Initialize NEW_DATA. */ if (data) { gassign *assign_stmt; gsi = gsi_after_labels (bb); param = make_ssa_name (DECL_ARGUMENTS (loop_fn)); assign_stmt = gimple_build_assign (param, build_fold_addr_expr (data)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); assign_stmt = gimple_build_assign (new_data, fold_convert (TREE_TYPE (new_data), param)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_PARALLEL. */ bb = split_loop_exit_edge (single_dom_exit (loop)); gsi = gsi_last_bb (bb); omp_return_stmt1 = gimple_build_omp_return (false); gimple_set_location (omp_return_stmt1, loc); gsi_insert_after (&gsi, omp_return_stmt1, GSI_NEW_STMT); /* Extract data for GIMPLE_OMP_FOR. */ gcc_assert (loop->header == single_dom_exit (loop)->src); cond_stmt = as_a <gcond *> (last_stmt (loop->header)); cvar = gimple_cond_lhs (cond_stmt); cvar_base = SSA_NAME_VAR (cvar); phi = SSA_NAME_DEF_STMT (cvar); cvar_init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop)); initvar = copy_ssa_name (cvar); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, loop_preheader_edge (loop)), initvar); cvar_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop)); gsi = gsi_last_nondebug_bb (loop->latch); gcc_assert (gsi_stmt (gsi) == SSA_NAME_DEF_STMT (cvar_next)); gsi_remove (&gsi, true); /* Prepare cfg. */ for_bb = split_edge (loop_preheader_edge (loop)); ex_bb = split_loop_exit_edge (single_dom_exit (loop)); extract_true_false_edges_from_block (loop->header, &nexit, &exit); gcc_assert (exit == single_dom_exit (loop)); guard = make_edge (for_bb, ex_bb, 0); single_succ_edge (loop->latch)->flags = 0; end = make_edge (loop->latch, ex_bb, EDGE_FALLTHRU); for (gphi_iterator gpi = gsi_start_phis (ex_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { source_location locus; tree def; gphi *phi = gpi.phi (); gphi *stmt; stmt = as_a <gphi *> ( SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit))); def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop)); locus = gimple_phi_arg_location_from_edge (stmt, loop_preheader_edge (loop)); add_phi_arg (phi, def, guard, locus); def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (loop)); locus = gimple_phi_arg_location_from_edge (stmt, loop_latch_edge (loop)); add_phi_arg (phi, def, end, locus); } e = redirect_edge_and_branch (exit, nexit->dest); PENDING_STMT (e) = NULL; /* Emit GIMPLE_OMP_FOR. */ gimple_cond_set_lhs (cond_stmt, cvar_base); type = TREE_TYPE (cvar); t = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE); OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC; for_stmt = gimple_build_omp_for (NULL, GF_OMP_FOR_KIND_FOR, t, 1, NULL); gimple_set_location (for_stmt, loc); gimple_omp_for_set_index (for_stmt, 0, initvar); gimple_omp_for_set_initial (for_stmt, 0, cvar_init); gimple_omp_for_set_final (for_stmt, 0, gimple_cond_rhs (cond_stmt)); gimple_omp_for_set_cond (for_stmt, 0, gimple_cond_code (cond_stmt)); gimple_omp_for_set_incr (for_stmt, 0, build2 (PLUS_EXPR, type, cvar_base, build_int_cst (type, 1))); gsi = gsi_last_bb (for_bb); gsi_insert_after (&gsi, for_stmt, GSI_NEW_STMT); SSA_NAME_DEF_STMT (initvar) = for_stmt; /* Emit GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_bb (loop->latch); omp_cont_stmt = gimple_build_omp_continue (cvar_next, cvar); gimple_set_location (omp_cont_stmt, loc); gsi_insert_after (&gsi, omp_cont_stmt, GSI_NEW_STMT); SSA_NAME_DEF_STMT (cvar_next) = omp_cont_stmt; /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_FOR. */ gsi = gsi_last_bb (ex_bb); omp_return_stmt2 = gimple_build_omp_return (true); gimple_set_location (omp_return_stmt2, loc); gsi_insert_after (&gsi, omp_return_stmt2, GSI_NEW_STMT); /* After the above dom info is hosed. Re-compute it. */ free_dominance_info (CDI_DOMINATORS); calculate_dominance_info (CDI_DOMINATORS); return paral_bb; } /* Generates code to execute the iterations of LOOP in N_THREADS threads in parallel. NITER describes number of iterations of LOOP. REDUCTION_LIST describes the reductions existent in the LOOP. */ static void gen_parallel_loop (struct loop *loop, reduction_info_table_type *reduction_list, unsigned n_threads, struct tree_niter_desc *niter) { tree many_iterations_cond, type, nit; tree arg_struct, new_arg_struct; gimple_seq stmts; edge entry, exit; struct clsn_data clsn_data; unsigned prob; location_t loc; gimple cond_stmt; unsigned int m_p_thread=2; /* From --------------------------------------------------------------------- loop { IV = phi (INIT, IV + STEP) BODY1; if (COND) break; BODY2; } --------------------------------------------------------------------- with # of iterations NITER (possibly with MAY_BE_ZERO assumption), we generate the following code: --------------------------------------------------------------------- if (MAY_BE_ZERO || NITER < MIN_PER_THREAD * N_THREADS) goto original; BODY1; store all local loop-invariant variables used in body of the loop to DATA. GIMPLE_OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA); load the variables from DATA. GIMPLE_OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static)) BODY2; BODY1; GIMPLE_OMP_CONTINUE; GIMPLE_OMP_RETURN -- GIMPLE_OMP_FOR GIMPLE_OMP_RETURN -- GIMPLE_OMP_PARALLEL goto end; original: loop { IV = phi (INIT, IV + STEP) BODY1; if (COND) break; BODY2; } end: */ /* Create two versions of the loop -- in the old one, we know that the number of iterations is large enough, and we will transform it into the loop that will be split to loop_fn, the new one will be used for the remaining iterations. */ /* We should compute a better number-of-iterations value for outer loops. That is, if we have for (i = 0; i < n; ++i) for (j = 0; j < m; ++j) ... we should compute nit = n * m, not nit = n. Also may_be_zero handling would need to be adjusted. */ type = TREE_TYPE (niter->niter); nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); if (loop->inner) m_p_thread=2; else m_p_thread=MIN_PER_THREAD; many_iterations_cond = fold_build2 (GE_EXPR, boolean_type_node, nit, build_int_cst (type, m_p_thread * n_threads)); many_iterations_cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, invert_truthvalue (unshare_expr (niter->may_be_zero)), many_iterations_cond); many_iterations_cond = force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); if (!is_gimple_condexpr (many_iterations_cond)) { many_iterations_cond = force_gimple_operand (many_iterations_cond, &stmts, true, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); } initialize_original_copy_tables (); /* We assume that the loop usually iterates a lot. */ prob = 4 * REG_BR_PROB_BASE / 5; loop_version (loop, many_iterations_cond, NULL, prob, prob, REG_BR_PROB_BASE - prob, true); update_ssa (TODO_update_ssa); free_original_copy_tables (); /* Base all the induction variables in LOOP on a single control one. */ canonicalize_loop_ivs (loop, &nit, true); /* Ensure that the exit condition is the first statement in the loop. */ transform_to_exit_first_loop (loop, reduction_list, nit); /* Generate initializations for reductions. */ if (reduction_list->elements () > 0) reduction_list->traverse <struct loop *, initialize_reductions> (loop); /* Eliminate the references to local variables from the loop. */ gcc_assert (single_exit (loop)); entry = loop_preheader_edge (loop); exit = single_dom_exit (loop); eliminate_local_variables (entry, exit); /* In the old loop, move all variables non-local to the loop to a structure and back, and create separate decls for the variables used in loop. */ separate_decls_in_region (entry, exit, reduction_list, &arg_struct, &new_arg_struct, &clsn_data); /* Create the parallel constructs. */ loc = UNKNOWN_LOCATION; cond_stmt = last_stmt (loop->header); if (cond_stmt) loc = gimple_location (cond_stmt); create_parallel_loop (loop, create_loop_fn (loc), arg_struct, new_arg_struct, n_threads, loc); if (reduction_list->elements () > 0) create_call_for_reduction (loop, reduction_list, &clsn_data); scev_reset (); /* Cancel the loop (it is simpler to do it here rather than to teach the expander to do it). */ cancel_loop_tree (loop); /* Free loop bound estimations that could contain references to removed statements. */ FOR_EACH_LOOP (loop, 0) free_numbers_of_iterations_estimates_loop (loop); } /* Returns true when LOOP contains vector phi nodes. */ static bool loop_has_vector_phi_nodes (struct loop *loop ATTRIBUTE_UNUSED) { unsigned i; basic_block *bbs = get_loop_body_in_dom_order (loop); gphi_iterator gsi; bool res = true; for (i = 0; i < loop->num_nodes; i++) for (gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) if (TREE_CODE (TREE_TYPE (PHI_RESULT (gsi.phi ()))) == VECTOR_TYPE) goto end; res = false; end: free (bbs); return res; } /* Create a reduction_info struct, initialize it with REDUC_STMT and PHI, insert it to the REDUCTION_LIST. */ static void build_new_reduction (reduction_info_table_type *reduction_list, gimple reduc_stmt, gphi *phi) { reduction_info **slot; struct reduction_info *new_reduction; gcc_assert (reduc_stmt); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Detected reduction. reduction stmt is: \n"); print_gimple_stmt (dump_file, reduc_stmt, 0, 0); fprintf (dump_file, "\n"); } new_reduction = XCNEW (struct reduction_info); new_reduction->reduc_stmt = reduc_stmt; new_reduction->reduc_phi = phi; new_reduction->reduc_version = SSA_NAME_VERSION (gimple_phi_result (phi)); new_reduction->reduction_code = gimple_assign_rhs_code (reduc_stmt); slot = reduction_list->find_slot (new_reduction, INSERT); *slot = new_reduction; } /* Callback for htab_traverse. Sets gimple_uid of reduc_phi stmts. */ int set_reduc_phi_uids (reduction_info **slot, void *data ATTRIBUTE_UNUSED) { struct reduction_info *const red = *slot; gimple_set_uid (red->reduc_phi, red->reduc_version); return 1; } /* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */ static void gather_scalar_reductions (loop_p loop, reduction_info_table_type *reduction_list) { gphi_iterator gsi; loop_vec_info simple_loop_info; simple_loop_info = vect_analyze_loop_form (loop); for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); affine_iv iv; tree res = PHI_RESULT (phi); bool double_reduc; if (virtual_operand_p (res)) continue; if (!simple_iv (loop, loop, res, &iv, true) && simple_loop_info) { gimple reduc_stmt = vect_force_simple_reduction (simple_loop_info, phi, true, &double_reduc); if (reduc_stmt && !double_reduc) build_new_reduction (reduction_list, reduc_stmt, phi); } } destroy_loop_vec_info (simple_loop_info, true); /* As gimple_uid is used by the vectorizer in between vect_analyze_loop_form and destroy_loop_vec_info, we can set gimple_uid of reduc_phi stmts only now. */ reduction_list->traverse <void *, set_reduc_phi_uids> (NULL); } /* Try to initialize NITER for code generation part. */ static bool try_get_loop_niter (loop_p loop, struct tree_niter_desc *niter) { edge exit = single_dom_exit (loop); gcc_assert (exit); /* We need to know # of iterations, and there should be no uses of values defined inside loop outside of it, unless the values are invariants of the loop. */ if (!number_of_iterations_exit (loop, exit, niter, false)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " FAILED: number of iterations not known\n"); return false; } return true; } /* Try to initialize REDUCTION_LIST for code generation part. REDUCTION_LIST describes the reductions. */ static bool try_create_reduction_list (loop_p loop, reduction_info_table_type *reduction_list) { edge exit = single_dom_exit (loop); gphi_iterator gsi; gcc_assert (exit); gather_scalar_reductions (loop, reduction_list); for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); struct reduction_info *red; imm_use_iterator imm_iter; use_operand_p use_p; gimple reduc_phi; tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit); if (!virtual_operand_p (val)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "phi is "); print_gimple_stmt (dump_file, phi, 0, 0); fprintf (dump_file, "arg of phi to exit: value "); print_generic_expr (dump_file, val, 0); fprintf (dump_file, " used outside loop\n"); fprintf (dump_file, " checking if it a part of reduction pattern: \n"); } if (reduction_list->elements () == 0) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " FAILED: it is not a part of reduction.\n"); return false; } reduc_phi = NULL; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val) { if (!gimple_debug_bind_p (USE_STMT (use_p)) && flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) { reduc_phi = USE_STMT (use_p); break; } } red = reduction_phi (reduction_list, reduc_phi); if (red == NULL) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " FAILED: it is not a part of reduction.\n"); return false; } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "reduction phi is "); print_gimple_stmt (dump_file, red->reduc_phi, 0, 0); fprintf (dump_file, "reduction stmt is "); print_gimple_stmt (dump_file, red->reduc_stmt, 0, 0); } } } /* The iterations of the loop may communicate only through bivs whose iteration space can be distributed efficiently. */ for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); tree def = PHI_RESULT (phi); affine_iv iv; if (!virtual_operand_p (def) && !simple_iv (loop, loop, def, &iv, true)) { struct reduction_info *red; red = reduction_phi (reduction_list, phi); if (red == NULL) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " FAILED: scalar dependency between iterations\n"); return false; } } } return true; } /* Detect parallel loops and generate parallel code using libgomp primitives. Returns true if some loop was parallelized, false otherwise. */ static bool parallelize_loops (void) { unsigned n_threads = flag_tree_parallelize_loops; bool changed = false; struct loop *loop; struct tree_niter_desc niter_desc; struct obstack parloop_obstack; HOST_WIDE_INT estimated; source_location loop_loc; /* Do not parallelize loops in the functions created by parallelization. */ if (parallelized_function_p (cfun->decl)) return false; if (cfun->has_nonlocal_label) return false; gcc_obstack_init (&parloop_obstack); reduction_info_table_type reduction_list (10); init_stmt_vec_info_vec (); FOR_EACH_LOOP (loop, 0) { reduction_list.empty (); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Trying loop %d as candidate\n",loop->num); if (loop->inner) fprintf (dump_file, "loop %d is not innermost\n",loop->num); else fprintf (dump_file, "loop %d is innermost\n",loop->num); } /* If we use autopar in graphite pass, we use its marked dependency checking results. */ if (flag_loop_parallelize_all && !loop->can_be_parallel) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "loop is not parallel according to graphite\n"); continue; } if (!single_dom_exit (loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "loop is !single_dom_exit\n"); continue; } if (/* And of course, the loop must be parallelizable. */ !can_duplicate_loop_p (loop) || loop_has_blocks_with_irreducible_flag (loop) || (loop_preheader_edge (loop)->src->flags & BB_IRREDUCIBLE_LOOP) /* FIXME: the check for vector phi nodes could be removed. */ || loop_has_vector_phi_nodes (loop)) continue; estimated = estimated_stmt_executions_int (loop); if (estimated == -1) estimated = max_stmt_executions_int (loop); /* FIXME: Bypass this check as graphite doesn't update the count and frequency correctly now. */ if (!flag_loop_parallelize_all && ((estimated != -1 && estimated <= (HOST_WIDE_INT) n_threads * MIN_PER_THREAD) /* Do not bother with loops in cold areas. */ || optimize_loop_nest_for_size_p (loop))) continue; if (!try_get_loop_niter (loop, &niter_desc)) continue; if (!try_create_reduction_list (loop, &reduction_list)) continue; if (!flag_loop_parallelize_all && !loop_parallel_p (loop, &parloop_obstack)) continue; changed = true; if (dump_file && (dump_flags & TDF_DETAILS)) { if (loop->inner) fprintf (dump_file, "parallelizing outer loop %d\n",loop->header->index); else fprintf (dump_file, "parallelizing inner loop %d\n",loop->header->index); loop_loc = find_loop_location (loop); if (loop_loc != UNKNOWN_LOCATION) fprintf (dump_file, "\nloop at %s:%d: ", LOCATION_FILE (loop_loc), LOCATION_LINE (loop_loc)); } gen_parallel_loop (loop, &reduction_list, n_threads, &niter_desc); } free_stmt_vec_info_vec (); obstack_free (&parloop_obstack, NULL); /* Parallelization will cause new function calls to be inserted through which local variables will escape. Reset the points-to solution for ESCAPED. */ if (changed) pt_solution_reset (&cfun->gimple_df->escaped); return changed; } /* Parallelization. */ namespace { const pass_data pass_data_parallelize_loops = { GIMPLE_PASS, /* type */ "parloops", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_TREE_PARALLELIZE_LOOPS, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_parallelize_loops : public gimple_opt_pass { public: pass_parallelize_loops (gcc::context *ctxt) : gimple_opt_pass (pass_data_parallelize_loops, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *) { return flag_tree_parallelize_loops > 1; } virtual unsigned int execute (function *); }; // class pass_parallelize_loops unsigned pass_parallelize_loops::execute (function *fun) { if (number_of_loops (fun) <= 1) return 0; if (parallelize_loops ()) { fun->curr_properties &= ~(PROP_gimple_eomp); return TODO_update_ssa; } return 0; } } // anon namespace gimple_opt_pass * make_pass_parallelize_loops (gcc::context *ctxt) { return new pass_parallelize_loops (ctxt); }
schedules.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> void mat_mult(int n, double *A, double *B, double *C); void init(int n, int size, double **A, double **B, double **C, int **ptr); long usecs (); int main(){ int size, i, *ptr, ld, m, n, k; double *A, *B, *C; long t_start, t_end; size = 1500; init(n, size, &A, &B, &C, &ptr); t_start = usecs(); /* Parallelize this loop */ /* CC: the dynamic schedule is important for performnce because the cost of a matrix multiplication increases with the cibe of n */ #pragma omp parallel for schedule(dynamic,1) for (n=size; n>=100; n-=100){ /* Multiply two matrices of size n: C = A*B */ mat_mult(n, A+ptr[n], B+ptr[n], C+ptr[n]); } t_end = usecs(); printf("time : %8.2f msec.\n",((double)t_end-t_start)/1000.0); return 0; } /* Matrix multiplication routine */ void mat_mult(int n, double *A, double *B, double *C){ double alpha, beta; char NoTran='N'; alpha = 3.1; beta = 0.7; dgemm_(&NoTran, &NoTran, &n, &n, &n, &alpha, A, &n, B, &n, &beta, C, &n); return; } /* Initialization routine */ void init(int n, int size, double **A, double **B, double **C, int **ptr){ int i, totsize; totsize=0; *ptr = (int*)malloc((size+1)*sizeof(int)); for (i=100; i<=size; i+=100){ (*ptr)[i] = totsize; totsize+=i*i; } *A = (double*)malloc(totsize*sizeof(double)); *B = (double*)malloc(totsize*sizeof(double)); *C = (double*)malloc(totsize*sizeof(double)); for (i=0; i<totsize; i++){ (*A)[i] = (double)rand()/RAND_MAX; (*B)[i] = (double)rand()/RAND_MAX; (*C)[i] = (double)rand()/RAND_MAX; } return; } /* Timer */ long usecs (){ struct timeval t; gettimeofday(&t,NULL); return t.tv_sec*1000000+t.tv_usec; }
wave3d_perf_b.c
#ifndef TAPENADE #include <math.h> #endif #define Max(x,y) fmax(x,y) #define Min(x,y) fmin(x,y) #define Heaviside(x) ((x>=0)?1.0:0.0) #define u(x,xx,xxx) u[x][xx][xxx] #define u_b(x,xx,xxx) u_b[x][xx][xxx] #define c(x,xx,xxx) c[x][xx][xxx] #define u_1(x,xx,xxx) u_1[x][xx][xxx] #define u_1_b(x,xx,xxx) u_1_b[x][xx][xxx] #define u_2(x,xx,xxx) u_2[x][xx][xxx] #define u_2_b(x,xx,xxx) u_2_b[x][xx][xxx] void wave3d_perf_b(double*** u, double*** u_b, double*** c, double*** u_1, double*** u_1_b, double*** u_2, double*** u_2_b, double D, int n) { int i; int j; int k; i=0; //#pragma omp parallel for private(k,j) for ( j=1; j<=n - 2; j++ ) { for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); } } i=n - 2; j=0; //#pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); } i=n - 2; j=n - 2; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=n - 2; j=n - 2; k=n - 2; u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=n - 2; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); i=n - 2; j=n - 2; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=n - 2; //#pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; j=1; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=n - 2; j=1; k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=1; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); i=n - 2; j=1; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=1; //#pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; j=n - 1; //#pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=n - 2; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } i=n - 2; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=n - 2; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } i=1; j=0; //#pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); } i=1; j=n - 2; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=1; j=n - 2; k=n - 2; u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=n - 2; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); i=1; j=n - 2; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=n - 2; //#pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; j=1; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=1; j=1; k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=1; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); i=1; j=1; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=1; //#pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; j=n - 1; //#pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=1; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } i=1; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=1; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; //#pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } i=n - 1; //#pragma omp parallel for private(k,j) for ( j=1; j<=n - 2; j++ ) { for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=0; for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=n - 2; u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 1; for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } //#pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } } }
GB_binop__first_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int64) // A.*B function (eWiseMult): GB (_AemultB_08__first_int64) // A.*B function (eWiseMult): GB (_AemultB_02__first_int64) // A.*B function (eWiseMult): GB (_AemultB_04__first_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int64) // A*D function (colscale): GB (_AxD__first_int64) // D*A function (rowscale): GB (_DxB__first_int64) // C+=B function (dense accum): GB (_Cdense_accumB__first_int64) // C+=b function (dense accum): GB (_Cdense_accumb__first_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = aij #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT64 || GxB_NO_FIRST_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
main.c
/* SOR program */ #include <math.h> #include <stdio.h> #include <stdlib.h> #define Max(a, b) ((a) > (b) ? (a) : (b)) #define N 10 #define ITMAX 20 int main(int an, char **as) { int i, j, it; float MAXEPS = 0.5E-5f; float w = 0.5f; float A[N][N]; #pragma omp parallel default(shared) { #pragma omp for private(j) for (i = 0; i < N; i++) for (j = 0; j < N; j++) if (i == j) A[i][j] = N + 2; else A[i][j] = -1.0f; } for (it = 1; it <= ITMAX; it++) { float eps = 0.f; #pragma omp parallel default(shared) { #pragma omp for reduction(max : eps) collapse(2) ordered(2) schedule(static, 1) for (i = 1; i < N - 1; i++) for (j = 1; j < N - 1; j++) { #pragma omp ordered depend(sink : i - 1, j) depend(sink : i, j - 1) float s; s = A[i][j]; A[i][j] = (w / 4) * (A[i - 1][j] + A[i + 1][j] + A[i][j - 1] + A[i][j + 1]) + (1 - w) * A[i][j]; eps = Max(fabs(s - A[i][j]), eps); #pragma omp ordered depend(source) } } printf("it=%4i eps=%e\n", it, eps); if (eps < MAXEPS) break; } return 0; }
pr79428-6.c
/* PR c/79428 */ /* { dg-options "-fopenmp" } */ #pragma omp target /* { dg-error "expected declaration specifiers before end of line" } */
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; /// All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool isCurCompoundStmtAStmtExpr() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { clang::Module *Module = nullptr; bool ModuleInterface = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *&Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' Partition, ///< 'module partition X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); /// Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf ///< Condition in a constexpr if statement. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation()); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *&Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> CUDADeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> CUDAKnownEmittedFns; /// A partial call graph maintained during CUDA compilation to support /// deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to CUDAKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> CUDACallGraph; /// Diagnostic builder for CUDA errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class CUDADiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); ~CUDADiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (CUDADiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a CUDADiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiag.hasValue()) *Diag.PartialDiag << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<PartialDiagnostic> PartialDiag; }; /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteBinaryRHS(Scope *S, Expr *LHS, tok::TokenKind Op); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDefaultedMemberExceptionSpecs.empty() && "there shouldn't be any pending delayed defaulted member " "exception specs"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDefaultedMemberExceptionSpecs) SavedDefaultedMemberExceptionSpecs; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDefaultedMemberExceptionSpecs.swap( S.DelayedDefaultedMemberExceptionSpecs); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
uniform_grid_environment.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_ #define CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_ #include <assert.h> #include <omp.h> #include <algorithm> #include <array> #include <atomic> #include <cmath> #include <iostream> #include <limits> #include <memory> #include <mutex> #ifdef LINUX #include <parallel/algorithm> #endif // LINUX #include <utility> #include <vector> #include <morton/morton.h> #include "core/container/fixed_size_vector.h" #include "core/container/inline_vector.h" #include "core/container/math_array.h" #include "core/container/parallel_resize_vector.h" #include "core/container/sim_object_vector.h" #include "core/environment/environment.h" #include "core/functor.h" #include "core/param/param.h" #include "core/resource_manager.h" #include "core/util/log.h" #include "core/util/spinlock.h" namespace bdm { /// A class that represents Cartesian 3D grid class UniformGridEnvironment : public Environment { // DisplacementOpCuda needs access to some UniformGridEnvironment private // members to reconstruct // the grid on GPU (same for DisplacementOpOpenCL) friend struct DisplacementOpCuda; friend struct DisplacementOpOpenCL; public: /// A single unit cube of the grid struct Box { Spinlock lock_; // std::atomic<bool> timestamp_; uint32_t timestamp_; /// start value of the linked list of simulation objects inside this box. /// Next element can be found at `successors_[start_]` SoHandle start_; /// length of the linked list (i.e. number of simulation objects) /// uint64_t, because sizeof(Box) = 16, for uint16_t and uint64_t uint16_t length_; Box() : timestamp_(0), start_(SoHandle()), length_(0) {} /// Copy Constructor required for boxes_.resize() /// Since box values will be overwritten afterwards it forwards to the /// default ctor Box(const Box& other) : Box() {} Box& operator=(const Box& other) { // start_ = other.start_.load(std::memory_order_relaxed); // length_ = other.length_.load(std::memory_order_relaxed); start_ = other.start_; length_ = other.length_; return *this; } bool IsEmpty(uint64_t grid_timestamp) const { return grid_timestamp != timestamp_; } /// @brief Adds a simulation object to this box /// /// @param[in] so The object's identifier /// @param AddObject successors The successors void AddObject(SoHandle soh, SimObjectVector<SoHandle>* successors, UniformGridEnvironment* grid) { std::lock_guard<Spinlock> lock_guard(lock_); if (timestamp_ != grid->timestamp_) { timestamp_ = grid->timestamp_; length_ = 1; start_ = soh; } else { length_++; (*successors)[soh] = start_; start_ = soh; } } /// An iterator that iterates over the cells in this box struct Iterator { Iterator(UniformGridEnvironment* grid, const Box* box) : grid_(grid), current_value_(box->start_), countdown_(box->length_) { if (grid->timestamp_ != box->timestamp_) { countdown_ = 0; } } bool IsAtEnd() { return countdown_ <= 0; } Iterator& operator++() { countdown_--; if (countdown_ > 0) { current_value_ = grid_->successors_[current_value_]; } return *this; } SoHandle operator*() const { return current_value_; } /// Pointer to the neighbor grid; for accessing the successor_ list UniformGridEnvironment* grid_; /// The current simulation object to be considered SoHandle current_value_; /// The remain number of simulation objects to consider int countdown_ = 0; }; Iterator begin() const { // NOLINT auto* grid = static_cast<UniformGridEnvironment*>( Simulation::GetActive()->GetEnvironment()); return Iterator(grid, this); } }; /// An iterator that iterates over the boxes in this grid struct NeighborIterator { explicit NeighborIterator( const FixedSizeVector<const Box*, 27>& neighbor_boxes, uint64_t grid_timestamp) : neighbor_boxes_(neighbor_boxes), // start iterator from box 0 box_iterator_(neighbor_boxes_[0]->begin()), grid_timestamp_(grid_timestamp) { // if first box is empty if (neighbor_boxes_[0]->IsEmpty(grid_timestamp)) { ForwardToNonEmptyBox(grid_timestamp); } } bool IsAtEnd() const { return is_end_; } SoHandle operator*() const { return *box_iterator_; } /// Version where empty neighbor boxes are allowed NeighborIterator& operator++() { ++box_iterator_; // if iterator of current box has come to an end, continue with next box if (box_iterator_.IsAtEnd()) { return ForwardToNonEmptyBox(grid_timestamp_); } return *this; } private: /// The 27 neighbor boxes that will be searched for simulation objects const FixedSizeVector<const Box*, 27>& neighbor_boxes_; /// The box that shall be considered to iterate over for finding simulation /// objects typename Box::Iterator box_iterator_; uint64_t grid_timestamp_; /// The id of the box to be considered (i.e. value between 0 - 26) uint16_t box_idx_ = 0; /// Flag to indicate that all the neighbor boxes have been searched through bool is_end_ = false; /// Forwards the iterator to the next non empty box and returns itself /// If there are no non empty boxes is_end_ is set to true NeighborIterator& ForwardToNonEmptyBox(uint64_t grid_timestamp) { // increment box id until non empty box has been found while (++box_idx_ < neighbor_boxes_.size()) { // box is empty or uninitialized (padding box) -> continue if (neighbor_boxes_[box_idx_]->IsEmpty(grid_timestamp)) { continue; } // a non-empty box has been found box_iterator_ = neighbor_boxes_[box_idx_]->begin(); return *this; } // all remaining boxes have been empty; reached end is_end_ = true; return *this; } }; /// Enum that determines the degree of adjacency in search neighbor boxes // todo(ahmad): currently only kHigh is supported (hardcoded 26 several // places) enum Adjacency { kLow, /**< The closest 8 neighboring boxes */ kMedium, /**< The closest 18 neighboring boxes */ kHigh /**< The closest 26 neighboring boxes */ }; UniformGridEnvironment(Adjacency adjacency = kHigh) : adjacency_(adjacency) {} UniformGridEnvironment(UniformGridEnvironment const&) = delete; void operator=(UniformGridEnvironment const&) = delete; virtual ~UniformGridEnvironment() {} /// Clears the grid void Clear() override { box_length_ = 1; largest_object_size_ = 0; num_boxes_axis_ = {{0}}; num_boxes_xy_ = 0; int32_t inf = std::numeric_limits<int32_t>::max(); grid_dimensions_ = {inf, -inf, inf, -inf, inf, -inf}; threshold_dimensions_ = {inf, -inf}; successors_.clear(); has_grown_ = false; } struct AssignToBoxesFunctor : public Functor<void, SimObject*, SoHandle> { AssignToBoxesFunctor(UniformGridEnvironment* grid) : grid_(grid) {} void operator()(SimObject* sim_object, SoHandle soh) override { const auto& position = sim_object->GetPosition(); auto idx = grid_->GetBoxIndex(position); auto box = grid_->GetBoxPointer(idx); box->AddObject(soh, &(grid_->successors_), grid_); sim_object->SetBoxIdx(idx); } private: UniformGridEnvironment* grid_ = nullptr; }; /// Updates the grid, as simulation objects may have moved, added or deleted void Update() override { auto* rm = Simulation::GetActive()->GetResourceManager(); if (rm->GetNumSimObjects() != 0) { Clear(); timestamp_++; auto inf = Math::kInfinity; std::array<double, 6> tmp_dim = {{inf, -inf, inf, -inf, inf, -inf}}; CalcSimDimensionsAndLargestSimObject(&tmp_dim, &largest_object_size_); RoundOffGridDimensions(tmp_dim); auto los = ceil(largest_object_size_); assert(los > 0 && "The largest object size was found to be 0. Please check if your " "cells are correctly initialized."); box_length_ = los; for (int i = 0; i < 3; i++) { int dimension_length = grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i]; int r = dimension_length % box_length_; // If the grid is not perfectly divisible along each dimension by the // resolution, extend the grid so that it is if (r != 0) { // std::abs for the case that box_length_ > dimension_length grid_dimensions_[2 * i + 1] += (box_length_ - r); } else { // Else extend the grid dimension with one row, because the outmost // object lies exactly on the border grid_dimensions_[2 * i + 1] += box_length_; } } // Pad the grid to avoid out of bounds check when search neighbors for (int i = 0; i < 3; i++) { grid_dimensions_[2 * i] -= box_length_; grid_dimensions_[2 * i + 1] += box_length_; } // Calculate how many boxes fit along each dimension for (int i = 0; i < 3; i++) { int dimension_length = grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i]; assert((dimension_length % box_length_ == 0) && "The grid dimensions are not a multiple of its box length"); num_boxes_axis_[i] = dimension_length / box_length_; } num_boxes_xy_ = num_boxes_axis_[0] * num_boxes_axis_[1]; auto total_num_boxes = num_boxes_xy_ * num_boxes_axis_[2]; CheckGridGrowth(); // resize boxes_ if (boxes_.size() != total_num_boxes) { if (boxes_.capacity() < total_num_boxes) { boxes_.reserve(total_num_boxes * 2); } boxes_.resize(total_num_boxes); } successors_.reserve(); // Assign simulation objects to boxes AssignToBoxesFunctor functor(this); rm->ApplyOnAllElementsParallelDynamic(1000, functor); auto* param = Simulation::GetActive()->GetParam(); if (param->bound_space_) { int min = param->min_bound_; int max = param->max_bound_; threshold_dimensions_ = {min, max}; } if (param->thread_safety_mechanism_ == Param::ThreadSafetyMechanism::kAutomatic) { nb_mutex_builder_->Update(); } } else { // There are no sim objects in this simulation auto* param = Simulation::GetActive()->GetParam(); bool uninitialized = boxes_.size() == 0; if (uninitialized && param->bound_space_) { // Simulation has never had any simulation objects // Initialize grid dimensions with `Param::min_bound_` and // `Param::max_bound_` // This is required for the DiffusionGrid int min = param->min_bound_; int max = param->max_bound_; grid_dimensions_ = {min, max, min, max, min, max}; threshold_dimensions_ = {min, max}; has_grown_ = true; } else if (!uninitialized) { // all simulation objects have been removed in the last iteration // grid state remains the same, but we have to set has_grown_ to false // otherwise the DiffusionGrid will attempt to resize has_grown_ = false; } else { Log::Fatal( "UniformGridEnvironment", "You tried to initialize an empty simulation without bound space. " "Therefore we cannot determine the size of the simulation space. " "Please add simulation objects, or set Param::bound_space_, " "Param::min_bound_, and Param::max_bound_."); } } } /// @brief Calculates the squared euclidian distance between two points /// in 3D /// /// @param[in] pos1 Position of the first point /// @param[in] pos2 Position of the second point /// /// @return The distance between the two points /// inline double SquaredEuclideanDistance(const Double3& pos1, const Double3& pos2) const { const double dx = pos2[0] - pos1[0]; const double dy = pos2[1] - pos1[1]; const double dz = pos2[2] - pos1[2]; return (dx * dx + dy * dy + dz * dz); } inline bool WithinSquaredEuclideanDistance(double squared_radius, const Double3& pos1, const Double3& pos2) const { const double dx = pos2[0] - pos1[0]; const double dx2 = dx * dx; if (dx2 > squared_radius) { return false; } const double dy = pos2[1] - pos1[1]; const double dy2_plus_dx2 = dy * dy + dx2; if (dy2_plus_dx2 > squared_radius) { return false; } const double dz = pos2[2] - pos1[2]; const double distance = dz * dz + dy2_plus_dx2; return distance < squared_radius; } void UpdateBoxZOrder() { // iterate boxes in Z-order / morton order // TODO(lukas) this is a very quick attempt to test an idea // improve performance of this brute force solution zorder_sorted_boxes_.resize(boxes_.size()); const uint32_t nx = num_boxes_axis_[0]; const uint32_t ny = num_boxes_axis_[1]; const uint32_t nz = num_boxes_axis_[2]; #pragma omp parallel for collapse(3) for (uint32_t x = 0; x < nx; x++) { for (uint32_t y = 0; y < ny; y++) { for (uint32_t z = 0; z < nz; z++) { auto box_idx = GetBoxIndex(std::array<uint32_t, 3>{x, y, z}); auto morton = libmorton::morton3D_64_encode(x, y, z); zorder_sorted_boxes_[box_idx] = std::pair<uint32_t, const Box*>{morton, &boxes_[box_idx]}; } } } #ifdef LINUX __gnu_parallel::sort( zorder_sorted_boxes_.begin(), zorder_sorted_boxes_.end(), [](const auto& lhs, const auto& rhs) { return lhs.first < rhs.first; }); #else std::sort( zorder_sorted_boxes_.begin(), zorder_sorted_boxes_.end(), [](const auto& lhs, const auto& rhs) { return lhs.first < rhs.first; }); #endif // LINUX } /// This method iterates over all elements. Iteration is performed in /// Z-order of boxes. There is no particular order for elements inside a box. void IterateZOrder(Functor<void, const SoHandle&>& callback) override { UpdateBoxZOrder(); for (uint64_t i = 0; i < zorder_sorted_boxes_.size(); i++) { auto it = zorder_sorted_boxes_[i].second->begin(); while (!it.IsAtEnd()) { callback(*it); ++it; } } } /// @brief Applies the given lambda to each neighbor /// /// @param[in] lambda The operation as a lambda /// @param query The query object void ForEachNeighbor(const std::function<void(const SimObject*)>& lambda, const SimObject& query) const { auto idx = query.GetBoxIdx(); FixedSizeVector<const Box*, 27> neighbor_boxes; GetMooreBoxes(&neighbor_boxes, idx); auto* rm = Simulation::GetActive()->GetResourceManager(); NeighborIterator ni(neighbor_boxes, timestamp_); while (!ni.IsAtEnd()) { auto* sim_object = rm->GetSimObjectWithSoHandle(*ni); if (sim_object != &query) { lambda(sim_object); } ++ni; } } /// @brief Applies the given lambda to each neighbor or the specified /// simulation object. /// /// In simulation code do not use this function directly. Use the same /// function from the exeuction context (e.g. `InPlaceExecutionContext`) /// /// @param[in] lambda The operation as a lambda /// @param query The query object /// void ForEachNeighbor(Functor<void, const SimObject*, double>& lambda, const SimObject& query) override { const auto& position = query.GetPosition(); auto idx = query.GetBoxIdx(); FixedSizeVector<const Box*, 27> neighbor_boxes; GetMooreBoxes(&neighbor_boxes, idx); auto* rm = Simulation::GetActive()->GetResourceManager(); NeighborIterator ni(neighbor_boxes, timestamp_); const unsigned batch_size = 64; uint64_t size = 0; SimObject* sim_objects[batch_size] __attribute__((aligned(64))); double x[batch_size] __attribute__((aligned(64))); double y[batch_size] __attribute__((aligned(64))); double z[batch_size] __attribute__((aligned(64))); double squared_distance[batch_size] __attribute__((aligned(64))); auto process_batch = [&]() { #pragma omp simd for (uint64_t i = 0; i < size; ++i) { const double dx = x[i] - position[0]; const double dy = y[i] - position[1]; const double dz = z[i] - position[2]; squared_distance[i] = dx * dx + dy * dy + dz * dz; } for (uint64_t i = 0; i < size; ++i) { lambda(sim_objects[i], squared_distance[i]); } size = 0; }; while (!ni.IsAtEnd()) { auto soh = *ni; // increment iterator already here to hide memory latency ++ni; auto* sim_object = rm->GetSimObjectWithSoHandle(soh); if (sim_object != &query) { sim_objects[size] = sim_object; const auto& pos = sim_object->GetPosition(); x[size] = pos[0]; y[size] = pos[1]; z[size] = pos[2]; size++; if (size == batch_size) { process_batch(); } } } process_batch(); } /// @brief Applies the given lambda to each neighbor or the specified /// simulation object. /// /// In simulation code do not use this function directly. Use the same /// function from the exeuction context (e.g. `InPlaceExecutionContext`) /// /// @param[in] lambda The operation as a lambda /// @param query The query object /// @param[in] squared_radius The search radius squared /// void ForEachNeighborWithinRadius( const std::function<void(const SimObject*)>& lambda, const SimObject& query, double squared_radius) { const auto& position = query.GetPosition(); auto idx = query.GetBoxIdx(); FixedSizeVector<const Box*, 27> neighbor_boxes; GetMooreBoxes(&neighbor_boxes, idx); auto* rm = Simulation::GetActive()->GetResourceManager(); NeighborIterator ni(neighbor_boxes, timestamp_); while (!ni.IsAtEnd()) { // Do something with neighbor object auto* sim_object = rm->GetSimObjectWithSoHandle(*ni); if (sim_object != &query) { const auto& neighbor_position = sim_object->GetPosition(); if (this->WithinSquaredEuclideanDistance(squared_radius, position, neighbor_position)) { lambda(sim_object); } } ++ni; } } /// @brief Return the box index in the one dimensional array of the box /// that contains the position /// /// @param[in] position The position of the object /// /// @return The box index. /// size_t GetBoxIndex(const Double3& position) const { std::array<uint32_t, 3> box_coord; box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_; box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_; box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_; return GetBoxIndex(box_coord); } /// Gets the size of the largest object in the grid double GetLargestObjectSize() const override { return largest_object_size_; } const std::array<int32_t, 6>& GetDimensions() const override { return grid_dimensions_; } const std::array<int32_t, 2>& GetDimensionThresholds() const override { return threshold_dimensions_; } uint64_t GetNumBoxes() const { return boxes_.size(); } uint32_t GetBoxLength() { return box_length_; } std::array<uint32_t, 3> GetBoxCoordinates(size_t box_idx) const { std::array<uint32_t, 3> box_coord; box_coord[2] = box_idx / num_boxes_xy_; auto remainder = box_idx % num_boxes_xy_; box_coord[1] = remainder / num_boxes_axis_[0]; box_coord[0] = remainder % num_boxes_axis_[0]; return box_coord; } /// @brief Gets the information about the grid /// /// @param box_length The grid's box length /// @param num_boxes_axis The number boxes along each axis of the grid /// @param grid_dimensions The grid's dimensions /// /// @tparam TUint32 A uint32 type (could also be cl_uint) /// @tparam TInt32 A int32 type (could be cl_int) /// template <typename TUint32, typename TInt32> void GetGridInfo(TUint32* box_length, std::array<TUint32, 3>* num_boxes_axis, std::array<TInt32, 3>* grid_dimensions) { box_length[0] = box_length_; (*num_boxes_axis)[0] = num_boxes_axis_[0]; (*num_boxes_axis)[1] = num_boxes_axis_[1]; (*num_boxes_axis)[2] = num_boxes_axis_[2]; (*grid_dimensions)[0] = grid_dimensions_[0]; (*grid_dimensions)[1] = grid_dimensions_[2]; (*grid_dimensions)[2] = grid_dimensions_[4]; } // NeighborMutex --------------------------------------------------------- /// This class ensures thread-safety for the InPlaceExecutionContext for the /// case /// that a simulation object modifies its neighbors. class GridNeighborMutexBuilder : public Environment::NeighborMutexBuilder { public: /// The NeighborMutex class is a synchronization primitive that can be /// used to protect sim_objects data from being simultaneously accessed by /// multiple threads. class GridNeighborMutex : public Environment::NeighborMutexBuilder::NeighborMutex { public: GridNeighborMutex(const FixedSizeVector<uint64_t, 27>& mutex_indices, GridNeighborMutexBuilder* mutex_builder) : mutex_indices_(mutex_indices), mutex_builder_(mutex_builder) { // Deadlocks occur if mutliple threads try to acquire the same locks, // but in different order. // -> sort to avoid deadlocks - see lock ordering std::sort(mutex_indices_.begin(), mutex_indices_.end()); } virtual ~GridNeighborMutex() {} void lock() override { // NOLINT for (auto idx : mutex_indices_) { auto& mutex = mutex_builder_->mutexes_[idx].mutex_; // acquire lock (and spin if another thread is holding it) while (mutex.test_and_set(std::memory_order_acquire)) { } } } void unlock() override { // NOLINT for (auto idx : mutex_indices_) { auto& mutex = mutex_builder_->mutexes_[idx].mutex_; mutex.clear(std::memory_order_release); } } void SetMutexIndices(const FixedSizeVector<uint64_t, 27>& indices) { mutex_indices_ = indices; std::sort(mutex_indices_.begin(), mutex_indices_.end()); } private: FixedSizeVector<uint64_t, 27> mutex_indices_; GridNeighborMutexBuilder* mutex_builder_; }; /// Used to store mutexes in a vector. /// Always creates a new mutex (even for the copy constructor) struct MutexWrapper { MutexWrapper() {} MutexWrapper(const MutexWrapper&) {} std::atomic_flag mutex_ = ATOMIC_FLAG_INIT; }; virtual ~GridNeighborMutexBuilder() {} void Update() { auto* grid = static_cast<UniformGridEnvironment*>( Simulation::GetActive()->GetEnvironment()); mutexes_.resize(grid->GetNumBoxes()); } NeighborMutex* GetMutex(uint64_t box_idx) override { auto* grid = static_cast<UniformGridEnvironment*>( Simulation::GetActive()->GetEnvironment()); FixedSizeVector<uint64_t, 27> box_indices; grid->GetMooreBoxIndices(&box_indices, box_idx); thread_local GridNeighborMutex* mutex = new GridNeighborMutex(box_indices, this); mutex->SetMutexIndices(box_indices); return mutex; } private: /// one mutex for each box in `UniformGridEnvironment::boxes_` std::vector<MutexWrapper> mutexes_; }; /// Returns the `NeighborMutexBuilder`. The client use it to create a /// `NeighborMutex`. NeighborMutexBuilder* GetNeighborMutexBuilder() override { return nb_mutex_builder_.get(); } private: /// The vector containing all the boxes in the grid /// Using parallel resize vector to enable parallel initialization and thus /// better scalability. ParallelResizeVector<Box> boxes_; /// is incremented at each call to Update /// This is used to decide if boxes should be reinitialized uint32_t timestamp_ = 0; /// Length of a Box uint32_t box_length_ = 1; /// Stores the number of boxes for each axis std::array<uint32_t, 3> num_boxes_axis_ = {{0}}; /// Number of boxes in the xy plane (=num_boxes_axis_[0] * num_boxes_axis_[1]) size_t num_boxes_xy_ = 0; /// Implements linked list - array index = key, value: next element /// /// // Usage /// SoHandle current_element = ...; /// SoHandle next_element = successors_[current_element]; SimObjectVector<SoHandle> successors_; /// Determines which boxes to search neighbors in (see enum Adjacency) Adjacency adjacency_; /// The size of the largest object in the simulation double largest_object_size_ = 0; /// Cube which contains all simulation objects /// {x_min, x_max, y_min, y_max, z_min, z_max} std::array<int32_t, 6> grid_dimensions_; /// Stores the min / max dimension value that need to be surpassed in order /// to trigger a diffusion grid change std::array<int32_t, 2> threshold_dimensions_; /// stores pairs of <box morton code, box pointer> sorted by morton code. ParallelResizeVector<std::pair<uint32_t, const Box*>> zorder_sorted_boxes_; /// Holds instance of NeighborMutexBuilder. /// NeighborMutexBuilder is updated if `Param::thread_safety_mechanism_` /// is set to `kAutomatic` std::unique_ptr<GridNeighborMutexBuilder> nb_mutex_builder_ = std::make_unique<GridNeighborMutexBuilder>(); void CheckGridGrowth() { // Determine if the grid dimensions have changed (changed in the sense that // the grid has grown outwards) auto min_gd = *std::min_element(grid_dimensions_.begin(), grid_dimensions_.end()); auto max_gd = *std::max_element(grid_dimensions_.begin(), grid_dimensions_.end()); if (min_gd < threshold_dimensions_[0]) { threshold_dimensions_[0] = min_gd; has_grown_ = true; } if (max_gd > threshold_dimensions_[1]) { Log::Info("UniformGridEnvironment", "Your simulation objects are getting near the edge of " "the simulation space. Be aware of boundary conditions that " "may come into play!"); threshold_dimensions_[1] = max_gd; has_grown_ = true; } } void RoundOffGridDimensions(const std::array<double, 6>& grid_dimensions) { assert(grid_dimensions_[0] > -9.999999999); assert(grid_dimensions_[2] > -9.999999999); assert(grid_dimensions_[4] > -9.999999999); assert(grid_dimensions_[1] < 80); assert(grid_dimensions_[3] < 80); assert(grid_dimensions_[5] < 80); grid_dimensions_[0] = floor(grid_dimensions[0]); grid_dimensions_[2] = floor(grid_dimensions[2]); grid_dimensions_[4] = floor(grid_dimensions[4]); grid_dimensions_[1] = ceil(grid_dimensions[1]); grid_dimensions_[3] = ceil(grid_dimensions[3]); grid_dimensions_[5] = ceil(grid_dimensions[5]); } /// @brief Gets the Moore (i.e adjacent) boxes of the query boxAlso adds /// the /// query box. /// /// @param[out] neighbor_boxes The neighbor boxes /// @param[in] box_idx The query box /// void GetMooreBoxes(FixedSizeVector<const Box*, 27>* neighbor_boxes, size_t box_idx) const { neighbor_boxes->push_back(GetBoxPointer(box_idx)); // Adjacent 6 (top, down, left, right, front and back) if (adjacency_ >= kLow) { neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_)); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_)); neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx - 1)); neighbor_boxes->push_back(GetBoxPointer(box_idx + 1)); } // Adjacent 12 if (adjacency_ >= kMedium) { neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_axis_[0] + 1)); } // Adjacent 8 if (adjacency_ >= kHigh) { neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1)); } } /// @brief Gets the box indices of all adjacent boxes. Also adds the /// query box index. /// /// @param[out] box_indices Result containing all box indices /// @param[in] box_idx The query box /// void GetMooreBoxIndices(FixedSizeVector<uint64_t, 27>* box_indices, size_t box_idx) const { box_indices->push_back(box_idx); // Adjacent 6 (top, down, left, right, front and back) if (adjacency_ >= kLow) { box_indices->push_back(box_idx - num_boxes_xy_); box_indices->push_back(box_idx + num_boxes_xy_); box_indices->push_back(box_idx - num_boxes_axis_[0]); box_indices->push_back(box_idx + num_boxes_axis_[0]); box_indices->push_back(box_idx - 1); box_indices->push_back(box_idx + 1); } // Adjacent 12 if (adjacency_ >= kMedium) { box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0]); box_indices->push_back(box_idx - num_boxes_xy_ - 1); box_indices->push_back(box_idx - num_boxes_axis_[0] - 1); box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]); box_indices->push_back(box_idx + num_boxes_xy_ - 1); box_indices->push_back(box_idx + num_boxes_axis_[0] - 1); box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0]); box_indices->push_back(box_idx - num_boxes_xy_ + 1); box_indices->push_back(box_idx - num_boxes_axis_[0] + 1); box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]); box_indices->push_back(box_idx + num_boxes_xy_ + 1); box_indices->push_back(box_idx + num_boxes_axis_[0] + 1); } // Adjacent 8 if (adjacency_ >= kHigh) { box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1); box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1); box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1); box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1); box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1); box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1); box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1); box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1); } } /// Determines current box based on parameter box_idx and adds it together /// with half of the surrounding boxes to the vector. /// Legend: C = center, N = north, E = east, S = south, W = west, F = front, /// B = back /// For each box pair which is centro-symmetric only one box is taken -- /// e.g. E-W: E, or BNW-FSE: BNW /// /// (x-axis to the right \ y-axis up) /// z=1 /// +-----+----+-----+ /// | BNW | BN | BNE | /// +-----+----+-----+ /// | NW | N | NE | /// +-----+----+-----+ /// | FNW | FN | FNE | /// +-----+----+-----+ /// /// z = 0 /// +-----+----+-----+ /// | BW | B | BE | /// +-----+----+-----+ /// | W | C | E | /// +-----+----+-----+ /// | FW | F | FE | /// +-----+----+-----+ /// /// z = -1 /// +-----+----+-----+ /// | BSW | BS | BSE | /// +-----+----+-----+ /// | SW | S | SE | /// +-----+----+-----+ /// | FSW | FS | FSE | /// +-----+----+-----+ /// void GetHalfMooreBoxIndices(FixedSizeVector<size_t, 14>* neighbor_boxes, size_t box_idx) const { // C neighbor_boxes->push_back(box_idx); // BW neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] - 1); // FNW neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1); // NW neighbor_boxes->push_back(box_idx + num_boxes_xy_ - 1); // BNW neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1); // B neighbor_boxes->push_back(box_idx + num_boxes_axis_[0]); // FN neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]); // N neighbor_boxes->push_back(box_idx + num_boxes_xy_); // BN neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]); // E neighbor_boxes->push_back(box_idx + 1); // BE neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] + 1); // FNE neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1); // NE neighbor_boxes->push_back(box_idx + num_boxes_xy_ + 1); // BNE neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1); } /// @brief Gets the pointer to the box with the given index /// /// @param[in] index The index of the box /// /// @return The pointer to the box /// const Box* GetBoxPointer(size_t index) const { return &(boxes_[index]); } /// @brief Gets the pointer to the box with the given index /// /// @param[in] index The index of the box /// /// @return The pointer to the box /// Box* GetBoxPointer(size_t index) { return &(boxes_[index]); } /// Returns the box index in the one dimensional array based on box /// coordinates in space /// /// @param box_coord box coordinates in space (x, y, z) /// /// @return The box index. /// size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const { return box_coord[2] * num_boxes_xy_ + box_coord[1] * num_boxes_axis_[0] + box_coord[0]; } }; } // namespace bdm #endif // CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
GB_unaryop__ainv_int16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_fp64 // op(A') function: GB_tran__ainv_int16_fp64 // C type: int16_t // A type: double // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z ; GB_CAST_SIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_fp64 ( int16_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
high_life.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define N 2048 #define itera_max 2000 #define cores 1 int grid [N][N]; int new_grid[N][N]; void inicia_grids_zero(){ int i, j; //iniciando com zero #pragma omp parallel for collapse(2) for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ grid[i][j] = 0; new_grid[i][j] = 0; } } } void geracao_inicial(){ //GLIDER int lin = 1, col = 1; grid[lin ][col+1] = 1; grid[lin+1][col+2] = 1; grid[lin+2][col ] = 1; grid[lin+2][col+1] = 1; grid[lin+2][col+2] = 1; //R-pentomino lin =10; col = 30; grid[lin ][col+1] = 1; grid[lin ][col+2] = 1; grid[lin+1][col ] = 1; grid[lin+1][col+1] = 1; grid[lin+2][col+1] = 1; } int getNeighbors(int table[N][N], int i, int j){ int numAliveNeighbors = 0; // Up if(i != 0){ if(table[i - 1][j] == 1){ numAliveNeighbors++; } }else{ if(table[N - 1][j] == 1){ numAliveNeighbors++; } } // Down if(table[(i + 1)%N][j] == 1){ numAliveNeighbors++; } // Left if(j != 0){ if(table[i][j - 1] == 1){ numAliveNeighbors++; } }else{ if(table[i][N - 1] == 1){ numAliveNeighbors++; } } // Right if(table[i][(j + 1)%N] == 1){ numAliveNeighbors++; } // Upper-Right Corner if((i == 0) && (j == N - 1)){ if(table[N - 1][0] == 1){ numAliveNeighbors++; } }else{ // i!=0 || j != n-1 if(i == 0){ // já sabemos que j != N - 1 if(table[N - 1][j + 1] == 1){ numAliveNeighbors++; } }else{// i != 0 if(j == N - 1){ if(table[i - 1][0] == 1){ numAliveNeighbors++; } }else{ if(table[i - 1][j + 1] == 1){ numAliveNeighbors++; } } } } // Lower-Right Corner if(table[(i + 1)%N][(j + 1)%N] == 1){ numAliveNeighbors++; } // Upper-Left Corner if((i == 0) && (j == 0)){ if(table[N - 1][N - 1] == 1){ numAliveNeighbors++; } }else{ // i!=0 || j != 0 if(i == 0){ // já sabemos que j != 0 if(table[N - 1][j -1] == 1){ numAliveNeighbors++; } }else{// i != 0 if(j == 0){ if(table[i - 1][N - 1] == 1){ numAliveNeighbors++; } }else{ if(table[i - 1][j - 1] == 1){ numAliveNeighbors++; } } } } // Lower-Left Corner if((i == N - 1) && (j == 0)){ if(table[0][N - 1] == 1){ numAliveNeighbors++; } }else{ // i!=n-1 || j != 0 if(i == N - 1){ // já sabemos que j != 0 if(table[0][j - 1] == 1){ numAliveNeighbors++; } }else{// i != n-1 if(j == 0){ if(table[i + 1][N - 1] == 1){ numAliveNeighbors++; } }else{ if(table[i + 1][j - 1] == 1){ numAliveNeighbors++; } } } } return numAliveNeighbors; } void game_of_life(){ int i; int j; #pragma omp parallel for collapse(2) for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ //aplicar as regras do jogo da vida //celulas vivas com menos de 2 vizinhas vivas morrem if(grid[i][j] == 1 && getNeighbors(grid, i, j) < 2){ new_grid[i][j] = 0; } //célula viva com 2 ou 3 vizinhos deve permanecer viva para a próxima geração else if (grid[i][j] == 1 && getNeighbors(grid, i, j) == 2 || getNeighbors(grid, i, j) == 3){ new_grid[i][j] = 1; } //célula viva com 4 ou mais vizinhos morre por superpopulação else if (grid[i][j] == 1 && getNeighbors(grid, i, j) >= 4){ new_grid[i][j] = 0; } //morta com exatamente 3 vizinhos deve se tornar viva else if (grid[i][j] == 0 && ((getNeighbors(grid, i, j) == 3) || getNeighbors(grid, i, j) == 6)){ new_grid[i][j] = 1; } } } //passar a nova geração para atual #pragma omp parallel for collapse(2) for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ grid[i][j] = new_grid[i][j]; } } } int count_LiveCells(){ int i; int j; int cont = 0; #pragma omp parallel for collapse (2) reduction(+ : cont) for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ if (grid[i][j] == 1){ cont++; } } } return cont; } int main (){ int i, j; int var; int vida; int cont = 0; double start; double end; omp_set_num_threads(cores); inicia_grids_zero(); geracao_inicial(); start = omp_get_wtime (); for (vida = 0; vida < itera_max; vida++){ /* for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ if (grid[i][j] == 1){ printf("\033[1;31m"); printf("%d", grid[i][j]); printf("\033[0m"); } else{ printf("%d", grid[i][j]); } } printf("\n"); }*/ //printf("VIVOS: %d\n", count_LiveCells()); game_of_life(); //getchar(); //para fazer o for esperar por um enter } end = omp_get_wtime(); cont = count_LiveCells (); printf("VIVOS: %d\n", cont); printf("CORES: %d\n", cores); printf("TEMPO: %.2f\n", end - start); return 0; }
test-zrocks.c
/* xZTL: Zone Translation Layer User-space Library * * Copyright 2019 Samsung Electronics * * Written by Ivan L. Picoli <i.picoli@samsung.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <omp.h> #include <stdint.h> #include <stdlib.h> #include <libzrocks.h> #include <xztl.h> #include "CUnit/Basic.h" /* Number of Objects */ #define TEST_N_BUFFERS 2 /* Number of random objects to read */ #define TEST_RANDOM_ID 2 /* Object Size */ #define TEST_BUFFER_SZ (1024 * 1024 * 16) /* 16 MB */ static uint8_t *wbuf[TEST_N_BUFFERS]; static uint8_t *rbuf[TEST_N_BUFFERS]; static const char **devname; static void cunit_zrocks_assert_ptr(char *fn, void *ptr) { CU_ASSERT((uint64_t) ptr != 0); if (!ptr) printf("\n %s: ptr %p\n", fn, ptr); } static void cunit_zrocks_assert_int(char *fn, uint64_t status) { CU_ASSERT(status == 0); if (status) printf("\n %s: %lx\n", fn, status); } static int cunit_zrocks_init(void) { return 0; } static int cunit_zrocks_exit(void) { return 0; } static void test_zrocks_init(void) { int ret; ret = zrocks_init(*devname); cunit_zrocks_assert_int("zrocks_init", ret); } static void test_zrocks_exit(void) { zrocks_exit(); } static void test_zrocks_fill_buffer(uint32_t id) { uint32_t byte; uint8_t value = 0x1; for (byte = 0; byte < TEST_BUFFER_SZ; byte += 16) { value += 0x1; memset(&wbuf[id][byte], value, 16); } } static int test_zrocks_check_buffer(uint32_t id, uint32_t off, uint32_t size) { /*printf (" \nMem check:\n"); for (int i = off; i < off + size; i++) { if (i % 16 == 0 && i) printf("\n %d-%d ", i - (i%16), (i - (i%16)) + 16); printf (" %x/%x", wbuf[id][i], rbuf[id][i]); } printf("\n"); */ return memcmp(wbuf[id], rbuf[id], size); } static void test_zrocks_new(void) { uint32_t ids; uint64_t id, phys[TEST_N_BUFFERS]; uint32_t size; uint8_t level; int ret[TEST_N_BUFFERS]; ids = TEST_N_BUFFERS; size = TEST_BUFFER_SZ; level = 0; #pragma omp parallel for for (id = 0; id < ids; id++) { /* Allocate DMA memory */ wbuf[id] = xztl_media_dma_alloc(size, &phys[id]); cunit_zrocks_assert_ptr("xztl_media_dma_alloc", wbuf[id]); if (!wbuf[id]) continue; test_zrocks_fill_buffer(id); ret[id] = zrocks_new(id + 1, wbuf[id], size, level); cunit_zrocks_assert_int("zrocks_new", ret[id]); } } static void test_zrocks_read(void) { uint32_t ids, offset; uint64_t id, phys[TEST_N_BUFFERS]; int ret[TEST_N_BUFFERS]; size_t read_sz, size; ids = TEST_N_BUFFERS; read_sz = 1024 * 64; /* 64 KB */ size = TEST_BUFFER_SZ; for (id = 0; id < ids; id++) { /* Allocate DMA memory */ rbuf[id] = xztl_media_dma_alloc(size, &phys[id]); cunit_zrocks_assert_ptr("xztl_media_dma_alloc", rbuf[id]); if (!rbuf[id]) continue; memset(rbuf[id], 0x0, size); offset = 0; while (offset < size) { ret[id] = zrocks_read_obj(id + 1, offset, rbuf[id] + offset, read_sz); cunit_zrocks_assert_int("zrocks_read_obj", ret[id]); if (ret[id]) printf("Read error: ID %lu, offset %d, status: %x\n", id + 1, offset, ret[id]); offset += read_sz; } ret[id] = test_zrocks_check_buffer(id, 0, TEST_BUFFER_SZ); cunit_zrocks_assert_int("zrocks_read_obj:check", ret[id]); if (ret[id]) printf("Corruption: ID %lu, corrupted: %d bytes\n", id + 1, ret[id]); xztl_media_dma_free(rbuf[id]); } } static void test_zrocks_random_read(void) { uint64_t id, phys; uint64_t random_off[4] = {63, 24567, 175678, 267192}; size_t random_sz[4] = {532, 53, 2695, 1561}; // uint64_t random_off[1] = {24567}; // size_t random_sz[1] = {53}; int readi, ret; uint8_t *buf, *woff; id = TEST_RANDOM_ID; buf = xztl_media_dma_alloc(1024 * 512, &phys); cunit_zrocks_assert_ptr("xztl_media_dma_alloc", buf); if (!buf) return; for (readi = 0; readi < 4; readi++) { memset(buf, 0x0, random_sz[readi]); ret = zrocks_read_obj(id, random_off[readi], buf, random_sz[readi]); cunit_zrocks_assert_int("zrocks_read_obj", ret); woff = &wbuf[id - 1][random_off[readi]]; /* Uncomment for a detailed read check (per-byte print) printf (" \nMem check:\n"); for (int i = 0; i < random_sz[readi] + 4096; i++) { if (i % 16 == 0) printf("\n %lu-%lu ", (i+random_off[readi]) - ((i+random_off[readi]) % 16) + random_off[readi] % 16, ((i+random_off[readi]) - ((i+random_off[readi]) % 16)) + 16 + random_off[readi] % 16); printf (" %x/%x", woff[i], buf[i]); } printf("\n"); */ cunit_zrocks_assert_int("zrocks_read_obj:check", memcmp(woff, buf, random_sz[readi])); } xztl_media_dma_free(buf); for (int i = 0; i < TEST_N_BUFFERS; i++) xztl_media_dma_free(wbuf[i]); } int main(int argc, const char **argv) { int failed; if (argc < 2) { printf("Please provide the device path. e.g. liou:/dev/nvme0n2\n"); return -1; } devname = &argv[1]; printf("Device: %s\n", *devname); CU_pSuite pSuite = NULL; if (CUE_SUCCESS != CU_initialize_registry()) return CU_get_error(); pSuite = CU_add_suite("Suite_zrocks", cunit_zrocks_init, cunit_zrocks_exit); if (pSuite == NULL) { CU_cleanup_registry(); return CU_get_error(); } if ((CU_add_test(pSuite, "Initialize ZRocks", test_zrocks_init) == NULL) || (CU_add_test(pSuite, "ZRocks New", test_zrocks_new) == NULL) || (CU_add_test(pSuite, "ZRocks Read", test_zrocks_read) == NULL) || (CU_add_test(pSuite, "ZRocks Random Read", test_zrocks_random_read) == NULL) || (CU_add_test(pSuite, "Close ZRocks", test_zrocks_exit) == NULL)) { CU_cleanup_registry(); return CU_get_error(); } CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); return failed; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-2,4),ceild(8*t2-Nz-3,16));t3<=min(floord(4*Nt+Ny-9,16),floord(4*t1+Ny-1,16));t3++) { for (t4=max(max(ceild(t1-14,16),ceild(8*t2-Nz-51,64)),ceild(16*t3-Ny-51,64));t4<=min(min(floord(4*Nt+Nx-9,64),floord(4*t1+Nx-1,64)),floord(16*t3+Nx+3,64));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),4*t3+2),16*t4+14);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
par_csr_matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* In addition to publically accessible interface in HYPRE_mv.h, the implementation in this file uses accessor macros into the sequential matrix structure, and so includes the .h that defines that structure. Should those accessor functions become proper functions at some later date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* If create is called and row_starts and col_starts are NOT null, then it is assumed that they are of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix* hypre_ParCSRMatrixCreate( MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt *row_starts, HYPRE_BigInt *col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd ) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1]-first_row_index ; first_col_diag = col_starts[0]; local_num_cols = col_starts[1]-first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rows; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* We could make these null instead of leaving the range. If that change is made, then when this create is called from functions like the matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix ) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if ( hypre_ParCSRMatrixOwnsData(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if ( hypre_ParCSRMatrixDiagT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if ( hypre_ParCSRMatrixOffdT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if ( hypre_ParCSRMatrixOwnsColStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* RL: this is actually not correct since the memory_location may have been changed after allocation * put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) ) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if ( hypre_ParCSRMatrixProcOrdering(matrix) ) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) ); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix* hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location) ) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format ) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumRownnz *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumRownnz( hypre_ParCSRMatrix *matrix ) { MPI_Comm comm = hypre_ParCSRMatrixComm(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_Int *rownnz_diag = hypre_CSRMatrixRownnz(diag); HYPRE_Int *rownnz_offd = hypre_CSRMatrixRownnz(offd); HYPRE_Int num_rownnz_diag = hypre_CSRMatrixNumRownnz(diag); HYPRE_Int num_rownnz_offd = hypre_CSRMatrixNumRownnz(offd); HYPRE_BigInt local_num_rownnz; HYPRE_BigInt global_num_rownnz; HYPRE_Int i, j; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } local_num_rownnz = i = j = 0; while (i < num_rownnz_diag && j < num_rownnz_offd) { local_num_rownnz++; if (rownnz_diag[i] < rownnz_offd[j]) { i++; } else { j++; } } local_num_rownnz += (HYPRE_BigInt) ((num_rownnz_diag - i) + (num_rownnz_offd - j)); hypre_MPI_Allreduce(&local_num_rownnz, &global_num_rownnz, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rownnz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_data ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_row_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_col_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead( MPI_Comm comm, const char *file_name ) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* the bgl input file should only contain the EXACT range for local processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i=1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows,0,0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix, const char *file_name ) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i=0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0]+(HYPRE_BigInt)base_i; iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1; jlower = col_starts[0]+(HYPRE_BigInt)base_j; jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt)(i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i+1]; j++) { J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j); if ( diag_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if ( num_nonzeros_offd ) { for (j = offd_i[i]; j < offd_i[i+1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j; if ( offd_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J ); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_i_ptr, HYPRE_Int *base_j_ptr, hypre_ParCSRMatrix **matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int)row_starts[0]; base_j = (HYPRE_Int)col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int)(I-big_base_i-first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt)offd_j[i]; hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i=1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i=0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i=0; i < num_rows; i++) { i_col = diag_i[i]; for (j=i_col; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix, HYPRE_BigInt *row_start, HYPRE_BigInt *row_end, HYPRE_BigInt *col_start, HYPRE_BigInt *col_end ) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id ); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id ); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return(-1); } /* if buffer is not allocated and some information is requested, allocate buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values )) { /* allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1,tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for ( i = 0; i < m; i++ ) { tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row-row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow+1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); nzB = hypre_CSRMatrixI(Ba)[lrow+1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* Sort by increasing column numbers, assuming A and B already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for ( i = 0; i < nzA; i++ ) { v_p[imark+i] = vworkA[i]; } for ( i = imark; i < nzB; i++ ) { v_p[nzA+i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for ( i = 0; i < imark; i++ ) { idx_p[i] = cmap[cworkB[i]]; } } else { for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for ( i = 0; i < nzA; i++ ) { idx_p[imark+i] = cstart + cworkA[i]; } for ( i = imark; i < nzB; i++ ) { idx_p[nzA+i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm, hypre_CSRMatrix *A, HYPRE_BigInt *global_row_starts, HYPRE_BigInt *global_col_starts ) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2*(num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* contains code for what to expect, if 0: global_row_starts = global_col_starts, only global_row_starts given if 1: only global_row_starts given, global_col_starts = NULL if 2: both global_row_starts and global_col_starts given if 3: only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2*(num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i+num_procs+5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); // Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); // Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i+1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i+1]] - A_i[(HYPRE_Int)global_row_starts[i]]; } //num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i-1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs-1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix *A, hypre_ParCSRMatrix *matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i=0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i=0; i < num_rows; i++) { for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i=0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); for (i=0; i < num_rows+1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows/num_threads; rest = num_rows - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } count = diag_i[ns]+offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j=diag_i[i]; j < diag_i[i+1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag; } for (j=offd_i[i]; j < offd_i[i+1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* determine procs that have vector data and store their ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_BigInt)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); num_requests = 4*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* exchange contents of local_matrix_i - here we are sending to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering?*/ offset = matrix_i[new_vec_starts[1]]; for (i=1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++) matrix_i[j+1] += offset; offset = matrix_i[new_vec_starts[i+1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i=0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Int copy_data ) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B ) { hypre_ParCSRMatrix * C; HYPRE_BigInt * col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm( A ); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B ) == hypre_ParCSRMatrixFirstRowIndex( A ) ); hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A ); hypre_ParCSRMatrixOwnsRowStarts( C ) = 0; hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A ); hypre_ParCSRMatrixOwnsColStarts( C ) = 0; for ( p=0; p<=num_procs; ++p ) hypre_assert( hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B) ); hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0 ); hypre_ParCSRMatrixOffd( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C ); hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C; hypre_ParCSRMatrixCommPkg( C ) = NULL; hypre_ParCSRMatrixCommPkgT( C ) = NULL; hypre_ParCSRMatrixOwnsData( C ) = 1; /* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. I suspect, but don't know, that other parts of hypre do not assume that the correct values have been set. hypre_ParCSRMatrixSetNumNonzeros( C ); hypre_ParCSRMatrixSetDNumNonzeros( C );*/ hypre_ParCSRMatrixNumNonzeros( C ) = 0; hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0; hypre_ParCSRMatrixRowindices( C ) = NULL; hypre_ParCSRMatrixRowvalues( C ) = NULL; hypre_ParCSRMatrixGetrowactive( C ) = 0; return C; } /* drop the entries that are not on the diagonal and smaller than * its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag ++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* in normal cases: diagonal entry should not * appear in A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd ++; } } } A_diag_i_i = A_diag_i[i+1]; A_offd_i_i = A_offd_i[i+1]; A_diag_i[i+1] = nnz_diag; A_offd_i[i+1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if ( hypre_ParCSRMatrixCommPkg(A) ) { hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) ); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Perform dual truncation of ParCSR matrix. * This code is adapted from original BoomerAMGInterpTruncate() * A: parCSR matrix to be modified * tol: relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping), * this way, the application of the truncated matrix on a constant vector is the same as that of * the original matrix. * nrm_type: type of norm used for dropping with tol. * -- 0 = infinity-norm * -- 1 = 1-norm * -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global=0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cum_lost_per_thread; HYPRE_Int * num_lost_per_thread; HYPRE_Int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. A_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, A_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v*v; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v*v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i+1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i+1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < (A_diag_i[i+1]-num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i+1]-num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i=start; i<stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* Some rows exceed max_row_elmts, and require truncation. Essentially, * each thread truncates and compresses its range of rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (max_row_elmts < num_elmts) { /* copy both diagonal and off-diag parts of row i to _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j]+num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt-cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j,A_aux_data,0,cnt-1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j]-num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag-A_diag_i[i]; num_lost_offd -= cnt_offd-A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* nothing dropped from this row, but still have to shift entries back * by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index-A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd-A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_diag_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size ; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_offd_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size ; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
dnnl_quantize_v2-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file dnnl_quantize_v2-inl.h * \brief */ #ifndef MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_ #define MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_ #if MXNET_USE_ONEDNN == 1 #include <algorithm> #include <string> #include <vector> #include "../../nn/dnnl/dnnl_base-inl.h" #include "../quantize_v2-inl.h" namespace mxnet { namespace op { class SgDNNLQuantizeOperator { public: explicit SgDNNLQuantizeOperator(const nnvm::NodeAttrs& attrs) : param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {} void Forward(const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs); private: bool initalized_{false}; QuantizeV2Param param_; float cached_data_min_{0.f}; float cached_data_max_{0.f}; dnnl::memory::desc o_desc_; dnnl_args_map_t args_; std::shared_ptr<dnnl::reorder> fwd_pd_; }; void SgDNNLQuantizeOperator::Forward(const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { float quantized_range = 0.0; NDArray in_buffer = inputs[0]; float data_min = mshadow::red::limits::MaxValue<float>(); float data_max = mshadow::red::limits::MinValue<float>(); // Pass through quantized data if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) { if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) { *outputs[1].data().dptr<float>() = param_.min_calib_range.value(); *outputs[2].data().dptr<float>() = param_.max_calib_range.value(); } else { if (inputs[0].dtype() == mshadow::kUint8) { *outputs[1].data().dptr<float>() = 0; *outputs[2].data().dptr<float>() = kUint8Range; } else { *outputs[1].data().dptr<float>() = -kInt8Range; *outputs[2].data().dptr<float>() = kInt8Range; } } if (req[0] != kWriteInplace) { const_cast<NDArray&>(outputs[0]).CopyFrom(*inputs[0].GetDNNLData()); DNNLStream::Get()->Submit(); } } else { if (in_buffer.IsView() && in_buffer.IsDNNLData()) in_buffer = inputs[0].Reorder2Default(); auto i_mem = in_buffer.GetDNNLData(); if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) { data_min = param_.min_calib_range.value(); data_max = param_.max_calib_range.value(); } else { // no calib info in_buffer = inputs[0].Reorder2Default(); auto in_ptr = in_buffer.data().dptr<float>(); auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); std::vector<float> data_maxs(nthreads, data_max); std::vector<float> data_mins(nthreads, data_min); #pragma omp parallel for num_threads(nthreads) for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) { int tid = omp_get_thread_num(); if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i]; if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i]; } for (index_t i = 0; i < nthreads; i++) { if (data_maxs[i] > data_max) data_max = data_maxs[i]; if (data_mins[i] < data_min) data_min = data_mins[i]; } if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max)) initalized_ = false; } // Write output min/max auto out_type = GetQuantizeOutputType(param_); if (out_type == mshadow::kUint8) { quantized_range = kUint8Range; *outputs[1].data().dptr<float>() = data_min; *outputs[2].data().dptr<float>() = data_max; } else if (out_type == mshadow::kInt8) { float real_range = MaxAbs(data_min, data_max); quantized_range = kInt8Range; *outputs[1].data().dptr<float>() = -real_range; *outputs[2].data().dptr<float>() = real_range; } else { LOG(FATAL) << "dnnl quantize op only supports int8 and uint8 as output type"; } if (!initalized_) { cached_data_min_ = data_min; cached_data_max_ = data_max; float real_range = MaxAbs(data_min, data_max); float scale = quantized_range / real_range; dnnl::primitive_attr attr; const int mask = 0; std::vector<float> scales = {scale}; attr.set_output_scales(mask, scales); dnnl::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine(); auto i_desc = i_mem->get_desc(); size_t i_ndim = in_buffer.shape().ndim(); if (i_ndim == 4) { dnnl::memory::format_tag o_fmt = dnnl::memory::format_tag::nhwc; dnnl::memory::dims o_dims(i_desc.data.dims, i_desc.data.dims + i_desc.data.ndims); o_desc_ = dnnl::memory::desc(o_dims, get_dnnl_type(out_type), o_fmt); } else { o_desc_ = i_desc; o_desc_.data.data_type = get_dnnl_type_t(out_type); } auto reorder_pd = dnnl::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc_, attr); fwd_pd_ = std::make_shared<dnnl::reorder>(reorder_pd); initalized_ = true; } auto o_mem = CreateDNNLMem(outputs[0], o_desc_, req[0]); args_[DNNL_ARG_FROM] = *i_mem; args_[DNNL_ARG_TO] = *o_mem.second; DNNLStream::Get()->RegisterPrimArgs(*fwd_pd_, args_); CommitOutput(outputs[0], o_mem); DNNLStream::Get()->Submit(); } } static void SgDNNLQuantizeForward(const OpStatePtr& state_ptr, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { SgDNNLQuantizeOperator& op = state_ptr.get_state<SgDNNLQuantizeOperator>(); op.Forward(ctx, inputs, req, outputs); } } // namespace op } // namespace mxnet #endif // MXNET_USE_ONEDNN == 1 #endif // MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_
interTaskDummy.c
int main (int argc, char * argv[]) { int x = 0; #pragma omp parallel { if (x > 10) { } } }
region_layer.c
#include "region_layer.h" #include "activations.h" #include "blas.h" #include "box.h" #include "cuda.h" #include "utils.h" #include <stdio.h> #include <assert.h> #include <string.h> #include <stdlib.h> #define DOABS 1 region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes) { region_layer l = { (LAYER_TYPE)0 }; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = (float*)calloc(1, sizeof(float)); l.biases = (float*)calloc(n * 2, sizeof(float)); l.bias_updates = (float*)calloc(n * 2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truths = max_boxes*(5); l.delta = (float*)calloc(batch * l.outputs, sizeof(float)); l.output = (float*)calloc(batch * l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; l.backward = backward_region_layer; #ifdef GPU l.forward_gpu = forward_region_layer_gpu; l.backward_gpu = backward_region_layer_gpu; l.output_gpu = cuda_make_array(l.output, batch*l.outputs); l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); #endif fprintf(stderr, "detection\n"); srand(0); return l; } void resize_region_layer(layer *l, int w, int h) { int old_w = l->w; int old_h = l->h; l->w = w; l->h = h; l->outputs = h*w*l->n*(l->classes + l->coords + 1); l->inputs = l->outputs; l->output = (float*)realloc(l->output, l->batch * l->outputs * sizeof(float)); l->delta = (float*)realloc(l->delta, l->batch * l->outputs * sizeof(float)); #ifdef GPU if (old_w < w || old_h < h) { cuda_free(l->delta_gpu); cuda_free(l->output_gpu); l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); } #endif } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; b.y = (j + logistic_activate(x[index + 1])) / h; b.w = exp(x[index + 2]) * biases[2*n]; b.h = exp(x[index + 3]) * biases[2*n+1]; if(DOABS){ b.w = exp(x[index + 2]) * biases[2*n] / w; b.h = exp(x[index + 3]) * biases[2*n+1] / h; } return b; } float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale) { box pred = get_region_box(x, biases, n, index, i, j, w, h); float iou = box_iou(pred, truth); float tx = (truth.x*w - i); float ty = (truth.y*h - j); float tw = log(truth.w / biases[2*n]); float th = log(truth.h / biases[2*n + 1]); if(DOABS){ tw = log(truth.w*w / biases[2*n]); th = log(truth.h*h / biases[2*n + 1]); } delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0])); delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1])); delta[index + 2] = scale * (tw - x[index + 2]); delta[index + 3] = scale * (th - x[index + 3]); return iou; } void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss) { int i, n; if(hier){ float pred = 1; while(class_id >= 0){ pred *= output[index + class_id]; int g = hier->group[class_id]; int offset = hier->group_offset[g]; for(i = 0; i < hier->group_size[g]; ++i){ delta[index + offset + i] = scale * (0 - output[index + offset + i]); } delta[index + class_id] = scale * (1 - output[index + class_id]); class_id = hier->parent[class_id]; } *avg_cat += pred; } else { // Focal loss if (focal_loss) { // Focal Loss float alpha = 0.5; // 0.25 or 0.5 //float gamma = 2; // hardcoded in many places of the grad-formula int ti = index + class_id; float pt = output[ti] + 0.000000000000001F; // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832 //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); delta[index + n] *= alpha*grad; if (n == class_id) *avg_cat += output[index + n]; } } else { // default for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); if (n == class_id) *avg_cat += output[index + n]; } } } } float logit(float x) { return log(x/(1.-x)); } float tisnan(float x) { return (x != x); } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc; } void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output); void forward_region_layer(const region_layer l, network_state state) { int i,j,b,t,n; int size = l.coords + l.classes + 1; memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); #endif for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; l.output[index + 4] = logistic_activate(l.output[index + 4]); } } #ifndef GPU if (l.softmax_tree){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1); } } } #endif if(!state.train) return; memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); float avg_iou = 0; float recall = 0; float avg_cat = 0; float avg_obj = 0; float avg_anyobj = 0; int count = 0; int class_count = 0; *(l.cost) = 0; for (b = 0; b < l.batch; ++b) { if(l.softmax_tree){ int onlyclass_id = 0; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); if(!truth.x) break; // continue; int class_id = state.truth[t*5 + b*l.truths + 4]; float maxp = 0; int maxi = 0; if(truth.x > 100000 && truth.y > 100000){ for(n = 0; n < l.n*l.w*l.h; ++n){ int index = size*n + b*l.outputs + 5; float scale = l.output[index-1]; float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id); if(p > maxp){ maxp = p; maxi = n; } } int index = size*maxi + b*l.outputs + 5; delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++class_count; onlyclass_id = 1; break; } } if(onlyclass_id) continue; } for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w; ++i) { for (n = 0; n < l.n; ++n) { int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); float best_iou = 0; int best_class_id = -1; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file if(!truth.x) break; // continue; float iou = box_iou(pred, truth); if (iou > best_iou) { best_class_id = state.truth[t*5 + b*l.truths + 4]; best_iou = iou; } } avg_anyobj += l.output[index + 4]; l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); else{ if (best_iou > l.thresh) { l.delta[index + 4] = 0; if(l.classfix > 0){ delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss); ++class_count; } } } if(*(state.net.seen) < 12800){ box truth = {0}; truth.x = (i + .5)/l.w; truth.y = (j + .5)/l.h; truth.w = l.biases[2*n]; truth.h = l.biases[2*n+1]; if(DOABS){ truth.w = l.biases[2*n]/l.w; truth.h = l.biases[2*n+1]/l.h; } delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01); } } } } for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) { printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1); getchar(); continue; // if label contains class_id more than number of classes in the cfg-file } if(!truth.x) break; // continue; float best_iou = 0; int best_index = 0; int best_n = 0; i = (truth.x * l.w); j = (truth.y * l.h); //printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h); box truth_shift = truth; truth_shift.x = 0; truth_shift.y = 0; //printf("index %d %d\n",i, j); for(n = 0; n < l.n; ++n){ int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); if(l.bias_match){ pred.w = l.biases[2*n]; pred.h = l.biases[2*n+1]; if(DOABS){ pred.w = l.biases[2*n]/l.w; pred.h = l.biases[2*n+1]/l.h; } } //printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h); pred.x = 0; pred.y = 0; float iou = box_iou(pred, truth_shift); if (iou > best_iou){ best_index = index; best_iou = iou; best_n = n; } } //printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h); float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale); if(iou > .5) recall += 1; avg_iou += iou; //l.delta[best_index + 4] = iou - l.output[best_index + 4]; avg_obj += l.output[best_index + 4]; l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); if (l.rescore) { l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); } if (l.map) class_id = l.map[class_id]; delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++count; ++class_count; } } //printf("\n"); #ifndef GPU flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0); #endif *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); } void backward_region_layer(const region_layer l, network_state state) { axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1); } void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i; float *const predictions = l.output; #pragma omp parallel for for (i = 0; i < l.w*l.h; ++i){ int j, n; int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = i*l.n + n; int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; if(l.classfix == -1 && scale < .5) scale = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; if(l.softmax_tree){ hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if(map){ for(j = 0; j < 200; ++j){ float prob = scale*predictions[class_index+map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for(j = l.classes - 1; j >= 0; --j){ if(!found && predictions[class_index + j] > .5){ found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index+j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { for(j = 0; j < l.classes; ++j){ float prob = scale*predictions[class_index+j]; probs[index][j] = (prob > thresh) ? prob : 0; } } if(only_objectness){ probs[index][0] = scale; } } } } #ifdef GPU void forward_region_layer_gpu(const region_layer l, network_state state) { /* if(!state.train){ copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); return; } */ flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if(l.softmax_tree){ int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } }else if (l.softmax){ softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float* in_cpu = (float*)calloc(l.batch * l.inputs, sizeof(float)); float *truth_cpu = 0; if(state.truth){ int num_truth = l.batch*l.truths; truth_cpu = (float*)calloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs); //cudaStreamSynchronize(get_cuda_stream()); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_region_layer(l, cpu_state); //cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs); free(cpu_state.input); if(!state.train) return; cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); //cudaStreamSynchronize(get_cuda_stream()); if(cpu_state.truth) free(cpu_state.truth); } void backward_region_layer_gpu(region_layer l, network_state state) { flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta); } #endif void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w = 0; int new_h = 0; if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i, j, n, z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w / 2; ++i) { for (n = 0; n < l.n; ++n) { for (z = 0; z < l.classes + l.coords + 1; ++z) { int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if (z == 0) { flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for (i = 0; i < l.outputs; ++i) { l.output[i] = (l.output[i] + flip[i]) / 2.; } } for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int index = n*l.w*l.h + i; for (j = 0; j < l.classes; ++j) { dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if (dets[index].mask) { for (j = 0; j < l.coords - 4; ++j) { dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if (l.softmax_tree) { hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h); if (map) { for (j = 0; j < 200; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } else { int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h); dets[index].prob[j] = (scale > thresh) ? scale : 0; } } else { if (dets[index].objectness) { for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void zero_objectness(layer l) { int i, n; for (i = 0; i < l.w*l.h; ++i) { for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); l.output[obj_index] = 0; } } }
graph_verifier.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GRAPH_H_ #define GRAPH_H_ #include <cinttypes> #include <iostream> #include <type_traits> #include "pvector.h" #include "util.h" /* GAP Benchmark Suite Class: CSRGraph Author: Scott Beamer Simple container for graph in CSR format - Intended to be constructed by a Builder - To make weighted, set DestID_ template type to NodeWeight - MakeInverse parameter controls whether graph stores its inverse */ // Used to hold node & weight, with another node it makes a weighted edge template <typename NodeID_, typename WeightT_> struct NodeWeight { NodeID_ v; WeightT_ w; NodeWeight() {} NodeWeight(NodeID_ v) : v(v), w(1) {} NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {} bool operator< (const NodeWeight& rhs) const { return v == rhs.v ? w < rhs.w : v < rhs.v; } // doesn't check WeightT_s, needed to remove duplicate edges bool operator== (const NodeWeight& rhs) const { return v == rhs.v; } // doesn't check WeightT_s, needed to remove self edges bool operator== (const NodeID_& rhs) const { return v == rhs; } operator NodeID_() { return v; } }; template <typename NodeID_, typename WeightT_> std::ostream& operator<<(std::ostream& os, const NodeWeight<NodeID_, WeightT_>& nw) { os << nw.v << " " << nw.w; return os; } template <typename NodeID_, typename WeightT_> std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) { is >> nw.v >> nw.w; return is; } // Syntatic sugar for an edge template <typename SrcT, typename DstT = SrcT> struct EdgePair { SrcT u; DstT v; EdgePair() {} EdgePair(SrcT u, DstT v) : u(u), v(v) {} }; // SG = serialized graph, these types are for writing graph to file typedef int32_t SGID; typedef EdgePair<SGID> SGEdge; typedef int64_t SGOffset; template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true> class CSRGraph { // Used to access neighbors of vertex, basically sugar for iterators class Neighborhood { NodeID_ n_; DestID_** g_index_; public: Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {} typedef DestID_* iterator; iterator begin() { return g_index_[n_]; } iterator end() { return g_index_[n_+1]; } }; void ReleaseResources() { //added a second condition to prevent double free (transpose graphs) if (out_index_ != nullptr) delete[] out_index_; if (out_neighbors_ != nullptr) delete[] out_neighbors_; if (directed_) { if (in_index_ != nullptr) delete[] in_index_; if (in_neighbors_ != nullptr) delete[] in_neighbors_; } if (flags_ != nullptr) delete[] flags_; } public: CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1), out_index_(nullptr), out_neighbors_(nullptr), in_index_(nullptr), in_neighbors_(nullptr), flags_(nullptr){} CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) : directed_(false), num_nodes_(num_nodes), out_index_(index), out_neighbors_(neighs), in_index_(index), in_neighbors_(neighs) { num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2; //adding flags used for deduplication flags_ = new int[num_nodes_]; //adding offsets for load balacne scheme SetUpOffsets(true); } CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs, DestID_** in_index, DestID_* in_neighs) : directed_(true), num_nodes_(num_nodes), out_index_(out_index), out_neighbors_(out_neighs), in_index_(in_index), in_neighbors_(in_neighs) , is_transpose_(false){ num_edges_ = out_index_[num_nodes_] - out_index_[0]; flags_ = new int[num_nodes_]; SetUpOffsets(true); } CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs, DestID_** in_index, DestID_* in_neighs, bool is_transpose) : directed_(true), num_nodes_(num_nodes), out_index_(out_index), out_neighbors_(out_neighs), in_index_(in_index), in_neighbors_(in_neighs) , is_transpose_(is_transpose){ num_edges_ = out_index_[num_nodes_] - out_index_[0]; flags_ = new int[num_nodes_]; SetUpOffsets(true); } CSRGraph(CSRGraph&& other) : directed_(other.directed_), num_nodes_(other.num_nodes_), num_edges_(other.num_edges_), out_index_(other.out_index_), out_neighbors_(other.out_neighbors_), in_index_(other.in_index_), in_neighbors_(other.in_neighbors_), is_transpose_(false) { other.num_edges_ = -1; other.num_nodes_ = -1; other.out_index_ = nullptr; other.out_neighbors_ = nullptr; other.in_index_ = nullptr; other.in_neighbors_ = nullptr; other.flags_ = nullptr; other.offsets_ = nullptr; } ~CSRGraph() { if (!is_transpose_) ReleaseResources(); } CSRGraph& operator=(CSRGraph&& other) { if (this != &other) { if (!is_transpose_) ReleaseResources(); directed_ = other.directed_; num_edges_ = other.num_edges_; num_nodes_ = other.num_nodes_; out_index_ = other.out_index_; out_neighbors_ = other.out_neighbors_; in_index_ = other.in_index_; in_neighbors_ = other.in_neighbors_; other.num_edges_ = -1; other.num_nodes_ = -1; other.out_index_ = nullptr; other.out_neighbors_ = nullptr; other.in_index_ = nullptr; other.in_neighbors_ = nullptr; other.flags_ = nullptr; other.offsets_ = nullptr; } return *this; } bool directed() const { return directed_; } int64_t num_nodes() const { return num_nodes_; } int64_t num_edges() const { return num_edges_; } int64_t num_edges_directed() const { return directed_ ? num_edges_ : 2*num_edges_; } int64_t out_degree(NodeID_ v) const { return out_index_[v+1] - out_index_[v]; } int64_t in_degree(NodeID_ v) const { static_assert(MakeInverse, "Graph inversion disabled but reading inverse"); return in_index_[v+1] - in_index_[v]; } Neighborhood out_neigh(NodeID_ n) const { return Neighborhood(n, out_index_); } Neighborhood in_neigh(NodeID_ n) const { static_assert(MakeInverse, "Graph inversion disabled but reading inverse"); return Neighborhood(n, in_index_); } void PrintStats() const { std::cout << "Graph has " << num_nodes_ << " nodes and " << num_edges_ << " "; if (!directed_) std::cout << "un"; std::cout << "directed edges for degree: "; std::cout << num_edges_/num_nodes_ << std::endl; } void PrintTopology() const { for (NodeID_ i=0; i < num_nodes_; i++) { std::cout << i << ": "; for (DestID_ j : out_neigh(i)) { std::cout << j << " "; } std::cout << std::endl; } } static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) { NodeID_ length = offsets.size(); DestID_** index = new DestID_*[length]; #pragma omp parallel for for (NodeID_ n=0; n < length; n++) index[n] = neighs + offsets[n]; return index; } pvector<SGOffset> VertexOffsets(bool in_graph = false) const { pvector<SGOffset> offsets(num_nodes_+1); for (NodeID_ n=0; n < num_nodes_+1; n++) if (in_graph) offsets[n] = in_index_[n] - in_index_[0]; else offsets[n] = out_index_[n] - out_index_[0]; return offsets; } void SetUpOffsets(bool in_graph = false) { offsets_ = new SGOffset[num_nodes_+1]; for (NodeID_ n=0; n < num_nodes_+1; n++) if (in_graph) offsets_[n] = in_index_[n] - in_index_[0]; else offsets_[n] = out_index_[n] - out_index_[0]; } Range<NodeID_> vertices() const { return Range<NodeID_>(num_nodes()); } //useful for deduplication int* flags_; SGOffset * offsets_; //we are making these fields public to for transpose and other operations on graphs //private: bool directed_; int64_t num_nodes_; int64_t num_edges_; DestID_** out_index_; DestID_* out_neighbors_; DestID_** in_index_; DestID_* in_neighbors_; bool is_transpose_; }; #endif // GRAPH_H_
GB_binop__bset_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bset_int64 // A.*B function (eWiseMult): GB_AemultB__bset_int64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bset_int64 // C+=b function (dense accum): GB_Cdense_accumb__bset_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_int64 // C=scalar+B GB_bind1st__bset_int64 // C=scalar+B' GB_bind1st_tran__bset_int64 // C=A+scalar GB_bind2nd__bset_int64 // C=A'+scalar GB_bind2nd_tran__bset_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITSET (x, y, int64_t, 64) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bset_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bset_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bset_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bset_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bset_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bset_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bset_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \ } GrB_Info GB_bind1st_tran__bset_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \ } GrB_Info GB_bind2nd_tran__bset_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ssnet.c
#include<stdio.h> #include <stdlib.h> #include <time.h> #include "omp.h" /* OpenMP implementation example Details of implementation/tutorial can be found here: http://madhugnadig.com/articles/parallel-processing/2017/02/25/parallel-computing-in-c-using-openMP.html */ clock_t t, end; double cpu_time_used; // Structure for enabling reduction on the index of elements struct Compare { int val; int index; }; // Custom reduction for finding hte index of the max element. #pragma omp declare reduction(maximum : struct Compare : omp_out = omp_in.val > omp_out.val ? omp_in : omp_out) int count=0; int *swaparray; void swap(int* a, int* b); void selectionSort(int* A, int n); void verify(int* A, int n); int* f_read(FILE *in) { int i=0,j=0; int *a; char buf[10]; char c; while((c=fgetc(in))!=EOF) { if(c==' ') { count++; } } //count++; printf("count = %d",count); a=(int *)malloc(count * sizeof(int)); swaparray=(int *)malloc(count * sizeof(int)); fseek(in,0,SEEK_SET); while((c=fgetc(in))!=EOF) { if(c==' ') { buf[j]='\0'; a[i++]=atoi(buf); j=0; } else { buf[j++]=c; } } /*buf[j]='\0'; a[i]=atoi(buf);*/ /*for(i=0;i<count;i++) { printf("%d ",a[i]); }*/ return a; } int main(){ int number, iter =0; int* Arr; char buf[10]; FILE *in=fopen("index8.txt","rb"); FILE *out=fopen("insorted8.txt","wb"); FILE *out1=fopen("inindex8.txt","wb"); //scanf("%d", &number); //Arr = (int *)malloc( number * sizeof(int)); /*for(; iter<number; iter++){ //scanf("%d", &Arr[iter]); Arr[iter]=iter; }*/ Arr=f_read(in); number = count; t = clock(); selectionSort(Arr, number); t = clock()-t; for(iter=0; iter<number;iter++){ //printf("%d ", Arr[iter]); itoa(Arr[iter],buf,10); fputs(buf,out); fputc(' ',out); itoa(swaparray[iter],buf,10); fputs(buf,out1); fputc(' ',out1); } cpu_time_used = ((double)t)/CLOCKS_PER_SEC; // Verify if the algorithm works as advised verify(Arr, number); printf("\nTime taken for sort: %lf\n", cpu_time_used); return 0; } void selectionSort(int* A, int n){ int startpos,i; for(startpos =0; startpos < n; startpos++){ // Declare the structure required for reduction struct Compare max; // Initialize the variables max.val = A[startpos]; max.index = startpos; printf("%d ",startpos); // Parallel for loop with custom reduction, at the end of the loop, max will have the max element and its index. #pragma omp parallel for reduction(maximum:max) for(i=startpos +1; i< n; ++i){ if(A[i] > max.val){ max.val = A[i]; max.index = i; } if(A[i] == startpos){ max.val = A[i]; max.index = i; break; } } swap(&A[startpos], &A[max.index]); swaparray[startpos]=max.index-startpos; } } // Verification function void verify(int* A, int n){ int failcount = 0; int iter; for(iter = 0; iter < n-1; iter++){ if(A[iter] < A[iter+1]){ failcount++; } } printf("\nFail count: %d\n", failcount); } //Swap function void swap(int* a, int* b){ int temp = *a; *a = *b; *b = temp; }
run_network.c
#include "const.def" #include <math.h> #include <stdlib.h> #include <stdio.h> #include <omp.h> #include "run_network.h" #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) bits_t integer_bits_per_act_layer[] = { 8, //meaned input image 10, //conv1 11,11, //fire2 .. 11,11, 11,11, 11,10, 11,11, 10,10, 9,8, 8,7, 5,4, 3,2, 5, 0};//last is softmax_layer bits_t integer_bits_per_layer[] = { 0,-1, //conv1: conv,bias 1,-1,0,-2,0,-3, 0,-1,0,-1,0,-2, 0,-2,0,-3,0,-4, 0,-2,0,-2,0,-4, 0,-2,-1,-3,0,-4, 0,-2,-1,-1,-1,-3, 0,3,0,0,-1,-1, -1,1,-1,-3,-1,-3, -2,-1,-3,-3,-2,-1, -1,0,-2,-2,-2,-1, -1,1}; int run_network(net_model * model, activation_t * image, activation_t * fm_buf_1, activation_t * fm_buf_2) { int in_w, in_h; int out_w, out_h; double begin, end; //#pragma omp parallel private(in_w, in_h, out_w, out_h,begin,end) shared (model, image, fm_buf_1, fm_buf_2) num_threads(4) { // Convolution 1 out_w = (int) ceilf((IMG_WIDTH - 3 + 1) / 2.0); out_h = (int) ceilf((IMG_HEIGHT - 3 + 1) / 2.0); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).conv1, (*model).conv1_bias, 3, 3, 64, 0, IMG_WIDTH, IMG_HEIGHT, out_w, out_h, image, fm_buf_1,2,1,0,0, integer_bits_per_act_layer[0],integer_bits_per_act_layer[1], integer_bits_per_layer[0],integer_bits_per_layer[1]); end = omp_get_wtime(); printf("L1 Wall time for 3x3 convolution is %2.6f\n", end-begin); //Pooling L 1 in_w = out_w; in_h = out_h; out_w = (int) ceilf((in_w - 3 + 1) / 2.0); out_h = (int) ceilf((in_h - 3 + 1) / 2.0); begin = omp_get_wtime(); pooling(in_w, in_h, out_w, out_h, 64, fm_buf_1, fm_buf_2); end = omp_get_wtime(); printf("L1 Wall time for Pooling is %2.6f\n", end-begin); //Fire 2 in_w = out_w; in_h = out_h; begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire2.sq1_1, (*model).fire2.sq1_1_bias, 1, 64, 16, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[1],integer_bits_per_act_layer[2], integer_bits_per_layer[2],integer_bits_per_layer[3]); end = omp_get_wtime(); printf("L2 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire2.ex1_1, (*model).fire2.ex1_1_bias, 1, 16, 64, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[2],integer_bits_per_act_layer[3], integer_bits_per_layer[4],integer_bits_per_layer[5]); end = omp_get_wtime(); printf("L2 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire2.ex3_3, (*model).fire2.ex3_3_bias, 3, 16, 64, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,64, integer_bits_per_act_layer[2],integer_bits_per_act_layer[3], integer_bits_per_layer[6],integer_bits_per_layer[7]); end = omp_get_wtime(); printf("L2 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 3 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire3.sq1_1, (*model).fire3.sq1_1_bias, 1, 128, 16, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[3],integer_bits_per_act_layer[4], integer_bits_per_layer[8],integer_bits_per_layer[9]); end = omp_get_wtime(); printf("L3 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire3.ex1_1, (*model).fire3.ex1_1_bias, 1, 16, 64, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[4],integer_bits_per_act_layer[5], integer_bits_per_layer[10],integer_bits_per_layer[11]); end = omp_get_wtime(); printf("L3 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire3.ex3_3, (*model).fire3.ex3_3_bias, 3, 16, 64, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,64, integer_bits_per_act_layer[4],integer_bits_per_act_layer[5], integer_bits_per_layer[12],integer_bits_per_layer[13]); end = omp_get_wtime(); printf("L3 Wall time for Ex3x3 is %2.6f\n", end-begin); // Pooling L 3 out_w = (int) ceilf((in_w - 3 + 1) / 2.0); out_h = (int) ceilf((in_h - 3 + 1) / 2.0); pooling(in_w, in_h, out_w, out_h, 128, fm_buf_2, fm_buf_1); //Fire 4 in_w = out_w; in_h = out_h; begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire4.sq1_1, (*model).fire4.sq1_1_bias, 1, 128, 32, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,0,0, integer_bits_per_act_layer[5],integer_bits_per_act_layer[6], integer_bits_per_layer[14],integer_bits_per_layer[15]); end = omp_get_wtime(); printf("L4 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire4.ex1_1, (*model).fire4.ex1_1_bias, 1, 32, 128, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,1,0, integer_bits_per_act_layer[6],integer_bits_per_act_layer[7], integer_bits_per_layer[16],integer_bits_per_layer[17]); end = omp_get_wtime(); printf("L4 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire4.ex3_3, (*model).fire4.ex3_3_bias, 3, 32, 128, 1, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,1,128, integer_bits_per_act_layer[6],integer_bits_per_act_layer[7], integer_bits_per_layer[18],integer_bits_per_layer[19]); end = omp_get_wtime(); printf("L4 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 5 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire5.sq1_1, (*model).fire5.sq1_1_bias, 1, 256, 32, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,0,0, integer_bits_per_act_layer[7],integer_bits_per_act_layer[8], integer_bits_per_layer[20],integer_bits_per_layer[21]); end = omp_get_wtime(); printf("L5 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire5.ex1_1, (*model).fire5.ex1_1_bias, 1, 32, 128, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,1,0, integer_bits_per_act_layer[8],integer_bits_per_act_layer[9], integer_bits_per_layer[22],integer_bits_per_layer[23]); end = omp_get_wtime(); printf("L5 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire5.ex3_3, (*model).fire5.ex3_3_bias, 3, 32, 128, 1, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,1,128, integer_bits_per_act_layer[8],integer_bits_per_act_layer[9], integer_bits_per_layer[24],integer_bits_per_layer[25]); end = omp_get_wtime(); printf("L5 Wall time for Ex3x3 is %2.6f\n", end-begin); // Pooling L 5 out_w = (int) ceilf((in_w - 3 + 1) / 2.0); out_h = (int) ceilf((in_h - 3 + 1) / 2.0); pooling(in_w, in_h, out_w, out_h, 256, fm_buf_1, fm_buf_2); //Fire 6 in_w = out_w; in_h = out_h; begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire6.sq1_1, (*model).fire6.sq1_1_bias, 1, 256, 48, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[9],integer_bits_per_act_layer[10], integer_bits_per_layer[26],integer_bits_per_layer[27]); end = omp_get_wtime(); printf("L6 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire6.ex1_1, (*model).fire6.ex1_1_bias, 1, 48, 192, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[10],integer_bits_per_act_layer[11], integer_bits_per_layer[28],integer_bits_per_layer[29]); end = omp_get_wtime(); printf("L6 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire6.ex3_3, (*model).fire6.ex3_3_bias, 3, 48, 192, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,192, integer_bits_per_act_layer[10],integer_bits_per_act_layer[11], integer_bits_per_layer[30],integer_bits_per_layer[31]); end = omp_get_wtime(); printf("L6 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 7 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire7.sq1_1, (*model).fire7.sq1_1_bias, 1, 384, 48, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[11],integer_bits_per_act_layer[12], integer_bits_per_layer[32],integer_bits_per_layer[33]); end = omp_get_wtime(); printf("L7 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire7.ex1_1, (*model).fire7.ex1_1_bias, 1, 48, 192, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[12],integer_bits_per_act_layer[13], integer_bits_per_layer[34],integer_bits_per_layer[35]); end = omp_get_wtime(); printf("L7 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire7.ex3_3, (*model).fire7.ex3_3_bias, 3, 48, 192, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,192, integer_bits_per_act_layer[12],integer_bits_per_act_layer[13], integer_bits_per_layer[36],integer_bits_per_layer[37]); end = omp_get_wtime(); printf("L7 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 8 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire8.sq1_1, (*model).fire8.sq1_1_bias, 1, 384, 64, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[13],integer_bits_per_act_layer[14], integer_bits_per_layer[38],integer_bits_per_layer[39]); end = omp_get_wtime(); printf("L8 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire8.ex1_1, (*model).fire8.ex1_1_bias, 1, 64, 256, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[14],integer_bits_per_act_layer[15], integer_bits_per_layer[40],integer_bits_per_layer[41]); end = omp_get_wtime(); printf("L8 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire8.ex3_3, (*model).fire8.ex3_3_bias, 3, 64, 256, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,256, integer_bits_per_act_layer[14],integer_bits_per_act_layer[15], integer_bits_per_layer[42],integer_bits_per_layer[43]); end = omp_get_wtime(); printf("L8 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 9 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire9.sq1_1, (*model).fire9.sq1_1_bias, 1, 512, 64, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[15],integer_bits_per_act_layer[16], integer_bits_per_layer[44],integer_bits_per_layer[45]); end = omp_get_wtime(); printf("L9 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire9.ex1_1, (*model).fire9.ex1_1_bias, 1, 64, 256, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[16],integer_bits_per_act_layer[17], integer_bits_per_layer[46],integer_bits_per_layer[47]); end = omp_get_wtime(); printf("L9 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire9.ex3_3, (*model).fire9.ex3_3_bias, 3, 64, 256, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,256, integer_bits_per_act_layer[16],integer_bits_per_act_layer[17], integer_bits_per_layer[48],integer_bits_per_layer[49]); end = omp_get_wtime(); printf("L9 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 10 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire10.sq1_1, (*model).fire10.sq1_1_bias, 1, 512, 96, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[17],integer_bits_per_act_layer[18], integer_bits_per_layer[50],integer_bits_per_layer[51]); end = omp_get_wtime(); printf("L10 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire10.ex1_1, (*model).fire10.ex1_1_bias, 1, 96, 384, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[18],integer_bits_per_act_layer[19], integer_bits_per_layer[52],integer_bits_per_layer[53]); end = omp_get_wtime(); printf("L10 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire10.ex3_3, (*model).fire10.ex3_3_bias, 3, 96, 384, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,384, integer_bits_per_act_layer[18],integer_bits_per_act_layer[19], integer_bits_per_layer[54],integer_bits_per_layer[55]); end = omp_get_wtime(); printf("L10 Wall time for Ex3x3 is %2.6f\n", end-begin); //Fire 11 begin = omp_get_wtime(); perform_convolution_1x1_sq((model_t *)(*model).fire11.sq1_1, (*model).fire11.sq1_1_bias, 1, 768, 96, 0, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1,1,1,0,0, integer_bits_per_act_layer[19],integer_bits_per_act_layer[20], integer_bits_per_layer[56],integer_bits_per_layer[57]); end = omp_get_wtime(); printf("L11 Wall time for Sq1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_1x1_ex((model_t *)(*model).fire11.ex1_1, (*model).fire11.ex1_1_bias, 1, 96, 384, 0, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,0, integer_bits_per_act_layer[20],integer_bits_per_act_layer[21], integer_bits_per_layer[58],integer_bits_per_layer[59]); end = omp_get_wtime(); printf("L11 Wall time for Ex1x1 is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_convolution_3x3((model_t *)(*model).fire11.ex3_3, (*model).fire11.ex3_3_bias, 3, 96, 384, 1, in_w, in_h, out_w, out_h, fm_buf_1, fm_buf_2,1,1,1,384, integer_bits_per_act_layer[20],integer_bits_per_act_layer[21], integer_bits_per_layer[60],integer_bits_per_layer[61]); end = omp_get_wtime(); printf("L11 Wall time for Ex3x3 is %2.6f\n", end-begin); //ConvDet begin = omp_get_wtime(); perform_convDet((model_t *)(*model).convDet, (*model).convDet_bias, 3, 768, CONVDET_CHANNELS, in_w, in_h, out_w, out_h, fm_buf_2, fm_buf_1, integer_bits_per_act_layer[21],integer_bits_per_act_layer[22], integer_bits_per_layer[62],integer_bits_per_layer[63]); end = omp_get_wtime(); printf("L12 Wall time for ConvDet is %2.6f\n", end-begin); begin = omp_get_wtime(); perform_softmax_sigmoid(out_w, out_h, fm_buf_1, integer_bits_per_act_layer[22], integer_bits_per_act_layer[23]); end = omp_get_wtime(); printf("L12 Wall time for Softmax/Sigmoid is %2.6f\n", end-begin); } return out_w*out_h; } //////////////// ////Functions //////////////// //identical convolutions with different names in order to differentiate in tracing // Convolution with Relu and custom filtersize(1 or 3)/ padding / stride and offset int perform_convolution_3x3( model_t * __restrict__ conv_params, model_t * __restrict__ bias_params, int kernel_size, int input_channels, int output_channels, int padding, int inp_w, int inp_h, int outp_w, int outp_h, activation_t * __restrict__ inp_buf, activation_t * __restrict__ outp_buf, int stride, int relu, int is_fire_expand_layer, int outp_channel_offset, bits_t inp_act_int_bits, bits_t outp_act_int_bits, bits_t conv_int_bits, bits_t bias_int_bits){ int i,j,k,l,p,q; const int half_kernelsize = (int)floorf(kernel_size/2.0); const int start = half_kernelsize-padding; const bits_t acc_frac_bits = (QUANT_BITS-inp_act_int_bits) + (QUANT_BITS-conv_int_bits); const bits_t bias_rightshift_bits = (QUANT_BITS-bias_int_bits)-acc_frac_bits; const bits_t out_rightshift_bits = acc_frac_bits - (QUANT_BITS - outp_act_int_bits); //#pragma omp for private(i,j,k,l,p,q) collapse(2) schedule(dynamic) for(i=start;i<outp_w+start;i++){ for(j=start;j<outp_h+start;j++){ int x0,y0; int outp_arr_idx = ((j-start)*outp_w+(i-start))*output_channels*(1+is_fire_expand_layer)+outp_channel_offset; int weight_arr_idx = 0; if(stride == 1){ x0=i; y0=j; } else if (stride == 2){ x0=2*i-1; y0=2*j-1; } for(q=0;q<output_channels;q++){ accum_t res=0; for(k=-half_kernelsize;k<=half_kernelsize;k++){ for(l=-half_kernelsize;l<=half_kernelsize;l++){ int x,y; x=x0+k; y=y0+l; for(p=0;p<input_channels;p++){ if(x>=0 && x<inp_w && y>=0 && y<inp_h) { mult_t mult = conv_params[weight_arr_idx] * inp_buf[y*input_channels*inp_w+x*input_channels+p]; res += mult; } weight_arr_idx++; } } } if(bias_rightshift_bits>0) res += bias_params[q]>>bias_rightshift_bits; else res += ((accum_t)bias_params[q])<<(-bias_rightshift_bits); activation_t quant_res =0; res += 1<<(out_rightshift_bits-1); //add 1 bit for rounding (instead of truncating) quant_res = (activation_t)(res>>out_rightshift_bits); if(res>0 || relu == 0) outp_buf[outp_arr_idx] = quant_res; else outp_buf[outp_arr_idx] = 0; outp_arr_idx++; } } } return 0; } int perform_convolution_1x1_sq( model_t * __restrict__ conv_params, model_t * __restrict__ bias_params, int kernel_size, int input_channels, int output_channels, int padding, int inp_w, int inp_h, int outp_w, int outp_h, activation_t * __restrict__ inp_buf, activation_t * __restrict__ outp_buf, int stride, int relu, int is_fire_expand_layer, int outp_channel_offset, bits_t inp_act_int_bits, bits_t outp_act_int_bits, bits_t conv_int_bits, bits_t bias_int_bits){ int i,j,k,l,p,q; const int half_kernelsize = (int)floorf(kernel_size/2.0); const int start = half_kernelsize-padding; const bits_t acc_frac_bits = (QUANT_BITS-inp_act_int_bits) + (QUANT_BITS-conv_int_bits); const bits_t bias_rightshift_bits = (QUANT_BITS-bias_int_bits)-acc_frac_bits; const bits_t out_rightshift_bits = acc_frac_bits - (QUANT_BITS - outp_act_int_bits); //#pragma omp for private(i,j,k,l,p,q) collapse(2) schedule(dynamic) for(i=start;i<outp_w+start;i++){ for(j=start;j<outp_h+start;j++){ int x0,y0; int outp_arr_idx = ((j-start)*outp_w+(i-start))*output_channels*(1+is_fire_expand_layer)+outp_channel_offset; int weight_arr_idx = 0; if(stride == 1){ x0=i; y0=j; } else if (stride == 2){ x0=2*i-1; y0=2*j-1; } for(q=0;q<output_channels;q++){ accum_t res=0; for(k=-half_kernelsize;k<=half_kernelsize;k++){ for(l=-half_kernelsize;l<=half_kernelsize;l++){ int x,y; x=x0+k; y=y0+l; for(p=0;p<input_channels;p++){ if(x>=0 && x<inp_w && y>=0 && y<inp_h) { mult_t mult = conv_params[weight_arr_idx] * inp_buf[y*input_channels*inp_w+x*input_channels+p]; res += mult; } weight_arr_idx++; } } } if(bias_rightshift_bits>0) res += bias_params[q]>>bias_rightshift_bits; else res += ((accum_t)bias_params[q])<<(-bias_rightshift_bits); activation_t quant_res =0; res += 1<<(out_rightshift_bits-1); //add 1 bit for rounding (instead of truncating) quant_res = (activation_t)(res>>out_rightshift_bits); if(res>0 || relu == 0) outp_buf[outp_arr_idx] = quant_res; else outp_buf[outp_arr_idx] = 0; outp_arr_idx++; } } } return 0; } int perform_convolution_1x1_ex( model_t * __restrict__ conv_params, model_t * __restrict__ bias_params, int kernel_size, int input_channels, int output_channels, int padding, int inp_w, int inp_h, int outp_w, int outp_h, activation_t * __restrict__ inp_buf, activation_t * __restrict__ outp_buf, int stride, int relu, int is_fire_expand_layer, int outp_channel_offset, bits_t inp_act_int_bits, bits_t outp_act_int_bits, bits_t conv_int_bits, bits_t bias_int_bits){ int i,j,k,l,p,q; const int half_kernelsize = (int)floorf(kernel_size/2.0); const int start = half_kernelsize-padding; const bits_t acc_frac_bits = (QUANT_BITS-inp_act_int_bits) + (QUANT_BITS-conv_int_bits); const bits_t bias_rightshift_bits = (QUANT_BITS-bias_int_bits)-acc_frac_bits; const bits_t out_rightshift_bits = acc_frac_bits - (QUANT_BITS - outp_act_int_bits); //#pragma omp for private(i,j,k,l,p,q) collapse(2) schedule(dynamic) for(i=start;i<outp_w+start;i++){ for(j=start;j<outp_h+start;j++){ int x0,y0; int outp_arr_idx = ((j-start)*outp_w+(i-start))*output_channels*(1+is_fire_expand_layer)+outp_channel_offset; int weight_arr_idx = 0; if(stride == 1){ x0=i; y0=j; } else if (stride == 2){ x0=2*i-1; y0=2*j-1; } for(q=0;q<output_channels;q++){ accum_t res=0; for(k=-half_kernelsize;k<=half_kernelsize;k++){ for(l=-half_kernelsize;l<=half_kernelsize;l++){ int x,y; x=x0+k; y=y0+l; for(p=0;p<input_channels;p++){ if(x>=0 && x<inp_w && y>=0 && y<inp_h) { mult_t mult = conv_params[weight_arr_idx] * inp_buf[y*input_channels*inp_w+x*input_channels+p]; res += mult; } weight_arr_idx++; } } } if(bias_rightshift_bits>0) res += bias_params[q]>>bias_rightshift_bits; else res += ((accum_t)bias_params[q])<<(-bias_rightshift_bits); activation_t quant_res =0; res += 1<<(out_rightshift_bits-1); //add 1 bit for rounding (instead of truncating) quant_res = (activation_t)(res>>out_rightshift_bits); if(res>0 || relu == 0) outp_buf[outp_arr_idx] = quant_res; else outp_buf[outp_arr_idx] = 0; outp_arr_idx++; } } } return 0; } // Convolution with filtersize 3, padding and stride 1 and float output int perform_convDet( model_t * __restrict__ conv_params, model_t * __restrict__ bias_params, int kernel_size, int input_channels, int output_channels, int inp_w, int inp_h, int outp_w, int outp_h, activation_t * __restrict__ inp_buf, activation_t * __restrict__ outp_buf, bits_t inp_act_int_bits, bits_t outp_act_int_bits, bits_t conv_int_bits, bits_t bias_int_bits){ int i,j,k,l,p,q; const int half_kernelsize = 1; const bits_t acc_frac_bits = (QUANT_BITS-inp_act_int_bits) + (QUANT_BITS-conv_int_bits); const bits_t bias_rightshift_bits = (QUANT_BITS-bias_int_bits)-acc_frac_bits; const bits_t out_rightshift_bits = acc_frac_bits - (QUANT_BITS - outp_act_int_bits); //printf("bias rightshift bits is %d and outact rightshift bits is %d \n",bias_rightshift_bits,out_rightshift_bits); //#pragma omp for private(i,j,k,l,p,q) collapse(2) schedule(dynamic) for(i=0;i<outp_w;i++){ for(j=0;j<outp_h;j++){ int outp_arr_idx = (j*outp_w+i)*output_channels; int weight_arr_idx = 0; for(q=0;q<output_channels;q++){ accum_t res=0; for(k=-half_kernelsize;k<=half_kernelsize;k++){ for(l=-half_kernelsize;l<=half_kernelsize;l++){ int x,y; x=i+k; y=j+l; for(p=0;p<input_channels;p++){ if(x>=0 && x<inp_w && y>=0 && y<inp_h) { //mult_t mult = conv_params[((q*kernel_size+(k+half_kernelsize))*kernel_size+ (l+half_kernelsize))*input_channels+p] * inp_buf[y*input_channels*inp_w+x*input_channels+p]; mult_t mult = conv_params[weight_arr_idx] * inp_buf[y*input_channels*inp_w+x*input_channels+p]; res += mult; } weight_arr_idx++; } } } if(bias_rightshift_bits>0) res += bias_params[q]>>bias_rightshift_bits; else res += ((accum_t)bias_params[q])<<(-bias_rightshift_bits); ((finish_t *)outp_buf)[outp_arr_idx] = ((finish_t)res) / (pow(2,(float)(QUANT_BITS-outp_act_int_bits+out_rightshift_bits))); outp_arr_idx++; } } } return 0; } //Max-Pooling without padding: size 3, stride 2 int pooling(int inp_w, int inp_h, int outp_w, int outp_h, int channels, activation_t * __restrict__ inp_buf, activation_t * __restrict__ outp_buf){ int j,i,q,m; //#pragma omp for private(j,i,q,m) collapse(2) schedule(dynamic) for(j=0;j<outp_h;j++){ for(i=0;i<outp_w;i++){ for(q=0;q<channels;q++){ activation_t max_values[3]; activation_t temp; for(m=0;m<3;m++){ if((2*j+m>inp_h)){ max_values[m]=-127; continue; } temp = MAX(inp_buf[((2*j+m)*inp_w+2*i)*channels+q],inp_buf[((2*j+m)*inp_w+2*i+1)*channels+q]); if(2*i+2<inp_w) max_values[m] = MAX(temp,inp_buf[((2*j+m)*inp_w+2*i+2)*channels+q]); else max_values[m] = temp; } outp_buf[(j*outp_w+i)*channels+q] = MAX(max_values[0],MAX(max_values[1],max_values[2])); } } } return 0; } int perform_softmax_sigmoid(int inp_w, int inp_h, activation_t * __restrict__ fm_buf,/* float * int_data, */ bits_t int_bits_convdet, bits_t softmax_bits){ //Sigmoid only on NUM_ANCHOR of objectness scores (confidence) //Softmax only over NUM_ANCHOR * NUM_CLASSES from channels and each only over NUM_CLASSES //including multiplication with sigmoid of objectness(confidence) int pixels, anchors,classes; //#pragma omp for private(pixels, anchors, classes) schedule(dynamic) for(pixels=0;pixels<inp_w*inp_h;pixels++){ for(anchors=0;anchors<NUM_ANCHOR;anchors++){ float sum=0; float sigmoid; float exps[NUM_CLASSES]; //Sigmoid on confidence scores sigmoid = 1/(1+ expf(-((finish_t *)fm_buf)[pixels*CONVDET_CHANNELS+NUM_ANCHOR*NUM_CLASSES+anchors])); //Softmax over classes including multiplication with confidence score for(classes=0;classes<NUM_CLASSES;classes++){ exps[classes] = expf(((finish_t *)fm_buf)[pixels*CONVDET_CHANNELS+anchors*NUM_CLASSES+classes]); sum += exps[classes]; } for(classes=0;classes<NUM_CLASSES;classes++){ ((finish_t *)fm_buf)[pixels*CONVDET_CHANNELS+anchors*NUM_CLASSES+classes] = sigmoid * exps[classes]/sum; } } } return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; if(dontuse_opencv) im = load_image_stb_resize(paths[i], 0, 0, 3); else im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } extern int check_mistakes; box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) getchar(); *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*t_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; assert(use_mixup != 2); if (use_mixup == 3 && letter_box) { printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { if (check_mistakes) getchar(); continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, blur, boxes, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); assert(use_mixup < 2); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 2) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 17; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
deconvolution_pack1to4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1ton, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { v4f32 _sum = (v4f32)__msa_fill_w(0); if (bias_data_ptr) { _sum = (v4f32)__msa_ld_w((const float*)bias_data_ptr + p * 4, 0); } const float* kptr = (const float*)weight_data_pack1ton + maxk * channels * p * 4; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; const float* sptr = m.row(sy); for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; float val = sptr[sx]; int k = y * kernel_w + x; v4f32 _val = (v4f32)__msa_fill_w_f32(val); v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val, _w)); } } kptr += maxk * 4; } _sum = activation_ps(_sum, activation_type, activation_params); __msa_st_w((v4i32)_sum, outptr + j * 4, 0); } outptr += outw * 4; } } }
fc_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "fc_kernel_arm.h" #include "api/c_api.h" #include "utility/sys_port.h" #include <stdint.h> #include <stdlib.h> #include <math.h> #include <arm_neon.h> #ifdef __aarch64__ void sgemv_1x8_a72(float* biases, float* input, float* kernel, long kernel_size, float* output); void sgemv_1x2_a72(float* biases, float* input, float* kernel, long kernel_size, float* output); #else void sgemv_1x8_a17(float* biases, float* input, float* kernel, int kernel_size, float* output); void sgemv_1x2_a17(float* biases, float* input, float* kernel, int kernel_size, float* output); #endif typedef void (*kernel_t)(float* biases, float* input, float* kernel, int kernel_size, float* output); static void sgemv1x8(float* input, float* output, float* kernel, float* bias, int kernel_size, int start_ch, int end_ch, int num_thread, kernel_t kernel_1x8) { #pragma omp parallel for num_threads(num_thread) for (int ch = start_ch; ch < end_ch; ch += 8) { float* cur_kernel = kernel + ch * kernel_size; float* cur_output = output + ch; float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; float* cur_bias = bias ? bias + ch : zeros; kernel_1x8(cur_bias, input, cur_kernel, kernel_size, cur_output); } } static void sgemv1x2(float* input, float* output, float* kernel, float* bias, int kernel_size, int start_ch, int end_ch, int num_thread, kernel_t kernel_1x2) { int end_ch2 = end_ch & -2; #pragma omp parallel for num_threads(num_thread) for (int ch = start_ch; ch < end_ch2; ch += 2) { float* cur_kernel = kernel + ch * kernel_size; float* cur_output = output + ch; float zeros[2] = {0.f, 0.f}; float* cur_bias = bias ? bias + ch : zeros; kernel_1x2(cur_bias, input, cur_kernel, kernel_size, cur_output); } int ch = end_ch2; if (end_ch & 0x1) { float* cur_kernel = kernel + end_ch2 * kernel_size; float* cur_output = output + end_ch2; float sum = bias ? bias[ch] : 0.f; for (int i = 0; i < kernel_size; i++) sum += input[i] * cur_kernel[i]; *cur_output = sum; } } static void interleave_kernel(const float* kernel, float* kernel_interleaved, int out_chan, int kernel_size) { int i, j, k; float* cur_kernel[8]; float* cur_kernel_interleaved; // interleave 8 kernel for (i = 0; i < (out_chan & -8); i += 8) { for (j = 0; j < 8; j++) cur_kernel[j] = (float*)kernel + kernel_size * (i + j); cur_kernel_interleaved = (float*)kernel_interleaved + kernel_size * i; for (k = 0; k < kernel_size; k++) for (j = 0; j < 8; j++) cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k); } // interleave 2 kernel for (; i < (out_chan & -2); i += 2) { for (j = 0; j < 2; j++) cur_kernel[j] = (float*)kernel + kernel_size * (i + j); cur_kernel_interleaved = (float*)kernel_interleaved + kernel_size * i; for (k = 0; k < kernel_size; k++) for (j = 0; j < 2; j++) cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k); } // copy last kernel if (out_chan & 0x1) { cur_kernel[0] = (float*)kernel + kernel_size * i; cur_kernel_interleaved = (float*)kernel_interleaved + kernel_size * i; for (k = 0; k < kernel_size; k++) cur_kernel_interleaved[k] = *(cur_kernel[0] + k); } } int fc_kernel_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct fc_priv_info* priv_info, struct fc_param* param) { int num_output = param->num_output; int kernel_size = filter_tensor->dims[1]; if (!priv_info->interleave_buffer) { int elemsize = input_tensor->elem_size; int mem_size = elemsize * num_output * kernel_size; void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } float* filter_data = (float*)filter_tensor->data; interleave_kernel(filter_data, (float*)priv_info->interleave_buffer, num_output, kernel_size); return 0; } int fc_kernel_postrun(struct fc_priv_info* priv_info) { if (priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; priv_info->interleave_buffer_size = 0; } if (priv_info->input_buffer != NULL) { sys_free(priv_info->input_buffer); priv_info->input_buffer = NULL; priv_info->input_buffer_size = 0; } return 0; } int fc_kernel_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct fc_priv_info* priv_info, struct fc_param* param, int num_thread, int cpu_affinity) { int out_num = param->num_output; int kernel_size = filter_tensor->dims[1]; float* input = input_tensor->data; float* output = output_tensor->data; float* biases = NULL; if (bias_tensor) biases = bias_tensor->data; float* weight = priv_info->interleave_buffer; int remain_out_start = (out_num >> 3) << 3; /* set cpu affinity sgemv kernel */ kernel_t kernel_1x8; kernel_t kernel_1x2; #ifdef __aarch64__ kernel_1x8 = (kernel_t)sgemv_1x8_a72; kernel_1x2 = (kernel_t)sgemv_1x2_a72; #else kernel_1x8 = (kernel_t)sgemv_1x8_a17; kernel_1x2 = (kernel_t)sgemv_1x2_a17; #endif /* process */ for (int i = 0; i < input_tensor->dims[0]; i++) { float* cur_input = input + i * kernel_size; float* cur_output = output + i * out_num; sgemv1x8(cur_input, cur_output, weight, biases, kernel_size, 0, remain_out_start, num_thread, kernel_1x8); if (out_num & 0x7) sgemv1x2(cur_input, cur_output, weight, biases, kernel_size, remain_out_start, out_num, num_thread, kernel_1x2); } return 0; }
DeclOpenMP.h
//===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief This file defines OpenMP nodes for declarative directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_DECLOPENMP_H #define LLVM_CLANG_AST_DECLOPENMP_H #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/Type.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" namespace clang { /// \brief This represents '#pragma omp threadprivate ...' directive. /// For example, in the following, both 'a' and 'A::b' are threadprivate: /// /// \code /// int a; /// #pragma omp threadprivate(a) /// struct A { /// static int b; /// #pragma omp threadprivate(b) /// }; /// \endcode /// class OMPThreadPrivateDecl final : public Decl, private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> { friend class ASTDeclReader; friend TrailingObjects; unsigned NumVars; virtual void anchor(); OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) : Decl(DK, DC, L), NumVars(0) { } ArrayRef<const Expr *> getVars() const { return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars); } MutableArrayRef<Expr *> getVars() { return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars); } void setVars(ArrayRef<Expr *> VL); public: static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, ArrayRef<Expr *> VL); static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVars().begin(); } varlist_iterator varlist_end() { return getVars().end(); } varlist_const_iterator varlist_begin() const { return getVars().begin(); } varlist_const_iterator varlist_end() const { return getVars().end(); } static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPThreadPrivate; } }; /// \brief This represents '#pragma omp declare reduction ...' directive. /// For example, in the following, declared reduction 'foo' for types 'int' and /// 'float': /// /// \code /// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \ /// initializer (omp_priv = 0) /// \endcode /// /// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer. class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext { private: friend class ASTDeclReader; /// \brief Combiner for declare reduction construct. Expr *Combiner; /// \brief Initializer for declare reduction construct. Expr *Initializer; /// \brief Reference to the previous declare reduction construct in the same /// scope with the same name. Required for proper templates instantiation if /// the declare reduction construct is declared inside compound statement. LazyDeclPtr PrevDeclInScope; virtual void anchor(); OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType Ty, OMPDeclareReductionDecl *PrevDeclInScope) : ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), Combiner(nullptr), Initializer(nullptr), PrevDeclInScope(PrevDeclInScope) {} void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) { PrevDeclInScope = Prev; } public: /// \brief Create declare reduction node. static OMPDeclareReductionDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType T, OMPDeclareReductionDecl *PrevDeclInScope); /// \brief Create deserialized declare reduction node. static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C, unsigned ID); /// \brief Get combiner expression of the declare reduction construct. Expr *getCombiner() { return Combiner; } const Expr *getCombiner() const { return Combiner; } /// \brief Set combiner expression for the declare reduction construct. void setCombiner(Expr *E) { Combiner = E; } /// \brief Get initializer expression (if specified) of the declare reduction /// construct. Expr *getInitializer() { return Initializer; } const Expr *getInitializer() const { return Initializer; } /// \brief Set initializer expression for the declare reduction construct. void setInitializer(Expr *E) { Initializer = E; } /// \brief Get reference to previous declare reduction construct in the same /// scope with the same name. OMPDeclareReductionDecl *getPrevDeclInScope(); const OMPDeclareReductionDecl *getPrevDeclInScope() const; static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPDeclareReduction; } static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) { return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D)); } static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) { return static_cast<OMPDeclareReductionDecl *>( const_cast<DeclContext *>(DC)); } }; /// Pseudo declaration for capturing expressions. Also is used for capturing of /// non-static data members in non-static member functions. /// /// Clang supports capturing of variables only, but OpenMP 4.5 allows to /// privatize non-static members of current class in non-static member /// functions. This pseudo-declaration allows properly handle this kind of /// capture by wrapping captured expression into a variable-like declaration. class OMPCapturedExprDecl final : public VarDecl { friend class ASTDeclReader; void anchor() override; OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id, QualType Type) : VarDecl(OMPCapturedExpr, C, DC, SourceLocation(), SourceLocation(), Id, Type, nullptr, SC_None) { setImplicit(); } public: static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC, IdentifierInfo *Id, QualType T); static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID); // Implement isa/cast/dyncast/etc. static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPCapturedExpr; } }; } // end namespace clang #endif
parfor.c
#include <omp.h> #include <stdio.h> #include <math.h> int main() { int N = 4; float b[N]; float a[N]; // serial portion of the code for (int j=0; j<N; j++) { b[j] = j+1; } // parallel portion of the code #pragma omp parallel { float a[N]; // if declared here, it will go out of scope after threads join. You will fix this in lab 6 #pragma omp for for(int i=0; i<N; i++){ printf("Thread %d working on component %d\n",omp_get_thread_num(),i); a[i] = sqrt(b[i]); } } // serial portion of the code for (int j=0; j<N; j++) { printf("square root of %f is = %f\n",b[j],a[j]); } return 0; }
nlcg.c
#include "completion.h" #include "gradient.h" #include "../csf.h" #include "../util.h" #include "../thd_info.h" #include <math.h> /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ static void p_update_direction( tc_model const * const model, val_t const beta, val_t * * gradients, val_t * * directions) { /* Restart, just go for steepest descent (negative gradient). * NOTE: we don't just run the normal code (multiplying by beta=0) because * the first iteration doesn't initialize directions. */ if(beta == 0) { #pragma omp parallel { for(idx_t m=0; m < model->nmodes; ++m) { idx_t const N = model->dims[m] * model->rank; val_t * const restrict dir = directions[m]; val_t const * const restrict grad = gradients[m]; #pragma omp for schedule(static) nowait for(idx_t x=0; x < N; ++x) { dir[x] = -grad[x]; } } } /* omp parallel */ return; } #pragma omp parallel { for(idx_t m=0; m < model->nmodes; ++m) { idx_t const N = model->dims[m] * model->rank; val_t * const restrict dir = directions[m]; val_t const * const restrict grad = gradients[m]; #pragma omp for schedule(static) nowait for(idx_t x=0; x < N; ++x) { /* p_{k+1} \gets - gradient + \Beta p_{k} */ dir[x] = -grad[x] + (beta * dir[x]); } } } /* omp parallel */ } /** * @brief Choose \beta using the Polak-Ribiere method with automatic restarts. * * @param model The current model. Just used for dimensionality. * @param prev_gradients The gradient of the previous iteration. * @param gradients The latest gradient. * * @return Beta, used to scale the previous direction when choosing the new * one. When a restart is needed, beta=0 and we take a steepest descent * step. */ static val_t p_conjugate_prplus( tc_model const * const model, val_t * * prev_gradients, val_t * * gradients) { val_t numer = 0; /* numerator is grad^T (grad - prev_grad) */ val_t denom = 0; /* denominator is || prev_grad ||^2 */ #pragma omp parallel reduction(+:numer, denom) { for(idx_t m=0; m < model->nmodes; ++m) { idx_t const N = model->dims[m] * model->rank; val_t const * const restrict grad = gradients[m]; val_t const * const restrict pgrad = prev_gradients[m]; #pragma omp for schedule(static) nowait for(idx_t x=0; x < N; ++x) { numer += grad[x] * (grad[x] - pgrad[x]); denom += pgrad[x] * pgrad[x]; } } } /* omp parallel */ /* beta <= 0 -> restart and use steepest descent */ val_t const beta = numer / denom; return SS_MAX(0, beta); } /** * @brief Choose \beta using the Fletcher-Reeves method. * * @param model The current model. Just used for dimensionality. * @param prev_gradients The gradient of the previous iteration. * @param gradients The latest gradient. * * @return Beta, used to scale the previous direction when choosing the new * one. */ static val_t p_conjugate_fr( tc_model const * const model, val_t * * prev_gradients, val_t * * gradients) { val_t numer = 0; /* numerator is || grad ||^2 */ val_t denom = 0; /* denominator is || prev_grad ||^2 */ #pragma omp parallel reduction(+:numer, denom) { for(idx_t m=0; m < model->nmodes; ++m) { idx_t const N = model->dims[m] * model->rank; val_t const * const restrict grad = gradients[m]; val_t const * const restrict pgrad = prev_gradients[m]; #pragma omp for schedule(static) nowait for(idx_t x=0; x < N; ++x) { numer += grad[x] * grad[x]; denom += pgrad[x] * pgrad[x]; } } } /* omp parallel */ return numer / denom; } /** * @brief Swap the previous and current gradients. The pointers are swapped. * * @param nmodes The number of gradients. * @param[out] prev_grads The gradients of the previous iterations. * @param[out] grads The latest gradients. */ static void p_swap_gradients( idx_t const nmodes, val_t * * prev_grads, val_t * * grads) { for(idx_t m=0; m < nmodes; ++m) { val_t * tmp = prev_grads[m]; prev_grads[m] = grads[m]; grads[m] = tmp; } } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void splatt_tc_nlcg( sptensor_t * train, sptensor_t const * const validate, tc_model * const model, tc_ws * const ws) { /* convert training data to CSF-ALLMODE */ double * opts = splatt_default_opts(); opts[SPLATT_OPTION_CSF_ALLOC] = SPLATT_CSF_ONEMODE; opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE; splatt_csf * csf = csf_alloc(train, opts); assert(csf->ntiles == 1); idx_t const nmodes = train->nmodes; /* allocate gradients */ val_t * directions[MAX_NMODES]; val_t * gradients[MAX_NMODES]; val_t * prev_gradients[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { size_t const bytes = model->dims[m] * model->rank * sizeof(**gradients); directions[m] = splatt_malloc(bytes); gradients[m] = splatt_malloc(bytes); prev_gradients[m] = splatt_malloc(bytes); } val_t loss = tc_loss_sq(train, model, ws); val_t frobsq = tc_frob_sq(model, ws); val_t prev_obj = loss + frobsq; tc_converge(train, validate, model, loss, frobsq, 0, ws); timer_start(&ws->tc_time); /* initialize direction with negative gradient */ tc_gradient(csf, model, ws, gradients); p_update_direction(model, 0, gradients, directions); /* foreach epoch */ for(idx_t e=1; e < ws->max_its+1; ++e) { /* find the new iterate */ tc_line_search(train, model, ws, prev_obj, gradients, directions, &loss, &frobsq); prev_obj = loss + frobsq; /* check for convergence */ if(tc_converge(train, validate, model, loss, frobsq, e, ws)) { break; } /* not converged, find next direction */ p_swap_gradients(nmodes, prev_gradients, gradients); tc_gradient(csf, model, ws, gradients); val_t const beta = p_conjugate_prplus(model, prev_gradients, gradients); p_update_direction(model, beta, gradients, directions); printf(" time-grad: %0.3fs time-line: %0.3fs\n", ws->grad_time.seconds, ws->line_time.seconds); } for(idx_t m=0; m < nmodes; ++m) { splatt_free(directions[m]); splatt_free(gradients[m]); splatt_free(prev_gradients[m]); } csf_free(csf, opts); splatt_free_opts(opts); }
variational_distance_calculation_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Ruben Zorrilla // // #if !defined(KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED ) #define KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "containers/model.h" #include "includes/kratos_flags.h" #include "elements/distance_calculation_element_simplex.h" #include "linear_solvers/linear_solver.h" #include "processes/process.h" #include "modeler/connectivity_preserve_modeler.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "utilities/variable_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and recomputes a signed distance function mantaining as much as possible the position of the zero of the function prior to the call. This is achieved by minimizing the function ( 1 - norm( gradient( distance ) )**2 with the restriction that "distance" is a finite elment function */ template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > class VariationalDistanceCalculationProcess : public Process { public: KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1); KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS); KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE); ///@name Type Definitions ///@{ typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typedef typename SchemeType::Pointer SchemePointerType; typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType; typedef ImplicitSolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType; ///@} ///@name Pointer Definitions /// Pointer definition of VariationalDistanceCalculationProcess KRATOS_CLASS_POINTER_DEFINITION(VariationalDistanceCalculationProcess); ///@} ///@name Life Cycle ///@{ /**This process recomputed the distance function mantaining the zero of the existing distance distribution * for this reason the DISTANCE should be initialized to values distinct from zero in at least some portions of the domain * alternatively, the DISTANCE shall be fixed to zero at least on some nodes, and the process will compute a positive distance * respecting that zero * @param base_model_parr - is the model part on the top of which the calculation will be performed * @param plinear_solver - linear solver to be used internally * @max_iterations - maximum number of iteration to be employed in the nonlinear optimization process. * - can also be set to 0 if a (very) rough approximation is enough * * EXAMPLE OF USAGE FROM PYTHON: * class distance_linear_solver_settings: solver_type = "AMGCL" tolerance = 1E-3 max_iteration = 200 scaling = False krylov_type = "CG" smoother_type = "SPAI0" verbosity = 0 import linear_solver_factory distance_linear_solver = linear_solver_factory.ConstructSolver(distance_linear_solver_settings) max_iterations=1 distance_calculator = VariationalDistanceCalculationProcess2D(fluid_model_part, distance_linear_solver, max_iterations) distance_calculator.Execute() */ VariationalDistanceCalculationProcess( ModelPart& rBaseModelPart, typename TLinearSolver::Pointer pLinearSolver, unsigned int MaxIterations = 10, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(), std::string AuxPartName = "RedistanceCalculationPart", double Coefficient1 = 0.01, double Coefficient2 = 0.1) : mDistancePartIsInitialized(false), mMaxIterations(MaxIterations), mrModel( rBaseModelPart.GetModel() ), mrBaseModelPart (rBaseModelPart), mOptions( Options ), mAuxModelPartName( AuxPartName ), mCoefficient1(Coefficient1), mCoefficient2(Coefficient2) { KRATOS_TRY ValidateInput(); // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex ReGenerateDistanceModelPart(rBaseModelPart); auto p_builder_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> >(pLinearSolver); InitializeSolutionStrategy(p_builder_solver); KRATOS_CATCH("") } /// Constructor with custom Builder And Solver /** To be used in the trilinos version, since the trilinos builder and * solver needs additional data (the EpetraComm). * @param rBaseModelPart Reference ModelPart for distance calculation. * @param pLinearSolver Linear solver for the distance system. * @param MaxIterations Maximum number of non-linear optimization iterations. * @param Options Configuration flags for the procedure. * @param AuxPartName Name to be used for the internal distance calculation ModelPart. */ VariationalDistanceCalculationProcess( ModelPart& rBaseModelPart, typename TLinearSolver::Pointer pLinearSolver, BuilderSolverPointerType pBuilderAndSolver, unsigned int MaxIterations = 10, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(), std::string AuxPartName = "RedistanceCalculationPart", double Coefficient1 = 0.01, double Coefficient2 = 0.1) : mDistancePartIsInitialized(false), mMaxIterations(MaxIterations), mrModel( rBaseModelPart.GetModel() ), mrBaseModelPart (rBaseModelPart), mOptions( Options ), mAuxModelPartName( AuxPartName ), mCoefficient1(Coefficient1), mCoefficient2(Coefficient2) { KRATOS_TRY ValidateInput(); // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex ReGenerateDistanceModelPart(rBaseModelPart); InitializeSolutionStrategy(pBuilderAndSolver); KRATOS_CATCH("") } /// Destructor. ~VariationalDistanceCalculationProcess() override { Clear(); }; ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ void Execute() override { KRATOS_TRY; if(mDistancePartIsInitialized == false){ ReGenerateDistanceModelPart(mrBaseModelPart); } ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); // TODO: check flag PERFORM_STEP1 // Step1 - solve a poisson problem with a source term which depends on the sign of the existing distance function r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,1); // Unfix the distances const int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); block_for_each(r_distance_model_part.Nodes(), [](Node<3>& rNode){ double& d = rNode.FastGetSolutionStepValue(DISTANCE); double& fix_flag = rNode.FastGetSolutionStepValue(FLAG_VARIABLE); // Free the DISTANCE values fix_flag = 1.0; rNode.Free(DISTANCE); // Save the distances rNode.SetValue(DISTANCE, d); if(d == 0){ d = 1.0e-15; fix_flag = -1.0; rNode.Fix(DISTANCE); } else { if(d > 0.0){ d = 1.0e15; // Set to a large number, to make sure that that the minimal distance is computed according to CaculateTetrahedraDistances } else { d = -1.0e15; } } }); block_for_each(r_distance_model_part.Elements(), [this](Element& rElem){ array_1d<double,TDim+1> distances; auto& geom = rElem.GetGeometry(); for(unsigned int i=0; i<TDim+1; i++){ distances[i] = geom[i].GetValue(DISTANCE); } const array_1d<double,TDim+1> original_distances = distances; // The element is cut by the interface if(this->IsSplit(distances)){ // Compute the unsigned distance using GeometryUtils if (mOptions.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) { GeometryUtils::CalculateExactDistancesToPlane(geom, distances); } else { if(TDim==3){ GeometryUtils::CalculateTetrahedraDistances(geom, distances); } else { GeometryUtils::CalculateTriangleDistances(geom, distances); } } // Assign the sign using the original distance values for(unsigned int i = 0; i < TDim+1; ++i){ if(original_distances[i] < 0){ distances[i] = -distances[i]; } } for(unsigned int i = 0; i < TDim+1; ++i){ double &d = geom[i].FastGetSolutionStepValue(DISTANCE); double &fix_flag = geom[i].FastGetSolutionStepValue(FLAG_VARIABLE); geom[i].SetLock(); if(std::abs(d) > std::abs(distances[i])){ d = distances[i]; } fix_flag = -1.0; geom[i].Fix(DISTANCE); geom[i].UnSetLock(); } } }); // SHALL WE SYNCHRONIZE SOMETHING IN HERE?¿?¿??¿ WE'VE CHANGED THE NODAL DISTANCE VALUES FROM THE ELEMENTS... this->SynchronizeFixity(); this->SynchronizeDistance(); // Compute the maximum and minimum distance for the fixed nodes double max_dist = 0.0; double min_dist = 0.0; for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(it_node->IsFixed(DISTANCE)){ const double& d = it_node->FastGetSolutionStepValue(DISTANCE); if(d > max_dist){ max_dist = d; } if(d < min_dist){ min_dist = d; } } } // Synchronize the maximum and minimum distance values const auto &r_communicator = r_distance_model_part.GetCommunicator().GetDataCommunicator(); max_dist = r_communicator.MaxAll(max_dist); min_dist = r_communicator.MinAll(min_dist); // Assign the max dist to all of the non-fixed positive nodes // and the minimum one to the non-fixed negatives block_for_each(r_distance_model_part.Nodes(), [&min_dist, &max_dist](Node<3>& rNode){ if(!rNode.IsFixed(DISTANCE)){ double& d = rNode.FastGetSolutionStepValue(DISTANCE); if(d>0){ d = max_dist; } else { d = min_dist; } } }); mpSolvingStrategy->Solve(); // Step2 - minimize the target residual r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,2); for(unsigned int it = 0; it<mMaxIterations; it++){ mpSolvingStrategy->Solve(); } // Unfix the distances VariableUtils().ApplyFixity(DISTANCE, false, r_distance_model_part.Nodes()); KRATOS_CATCH("") } void Clear() override { if(mrModel.HasModelPart( mAuxModelPartName )) mrModel.DeleteModelPart( mAuxModelPartName ); mDistancePartIsInitialized = false; mpSolvingStrategy->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "VariationalDistanceCalculationProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "VariationalDistanceCalculationProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mDistancePartIsInitialized; unsigned int mMaxIterations; Model& mrModel; ModelPart& mrBaseModelPart; Flags mOptions; std::string mAuxModelPartName; double mCoefficient1; double mCoefficient2; typename SolvingStrategyType::UniquePointer mpSolvingStrategy; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void ValidateInput() { const DataCommunicator& r_comm = mrBaseModelPart.GetCommunicator().GetDataCommunicator(); int num_elements = mrBaseModelPart.NumberOfElements(); int num_nodes = mrBaseModelPart.NumberOfNodes(); if (num_elements > 0) { const auto geometry_family = mrBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily(); KRATOS_ERROR_IF( (TDim == 2) && (geometry_family != GeometryData::KratosGeometryFamily::Kratos_Triangle) ) << "In 2D the element type is expected to be a triangle." << std::endl; KRATOS_ERROR_IF( (TDim == 3) && (geometry_family != GeometryData::KratosGeometryFamily::Kratos_Tetrahedra) ) << "In 3D the element type is expected to be a tetrahedron" << std::endl; } KRATOS_ERROR_IF(r_comm.SumAll(num_nodes) == 0) << "The model part has no nodes." << std::endl; KRATOS_ERROR_IF(r_comm.SumAll(num_elements) == 0) << "The model Part has no elements." << std::endl; // Check that required nodal variables are present VariableUtils().CheckVariableExists<Variable<double > >(DISTANCE, mrBaseModelPart.Nodes()); VariableUtils().CheckVariableExists<Variable<double > >(FLAG_VARIABLE, mrBaseModelPart.Nodes()); } void InitializeSolutionStrategy(BuilderSolverPointerType pBuilderAndSolver) { // Generate a linear strategy auto p_scheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >(); ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); bool CalculateReactions = false; bool ReformDofAtEachIteration = false; bool CalculateNormDxFlag = false; mpSolvingStrategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver> >( r_distance_model_part, p_scheme, pBuilderAndSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag); // TODO: check flag DO_EXPENSIVE_CHECKS mpSolvingStrategy->Check(); } virtual void ReGenerateDistanceModelPart(ModelPart& rBaseModelPart) { KRATOS_TRY if(mrModel.HasModelPart( mAuxModelPartName )) mrModel.DeleteModelPart( mAuxModelPartName ); // Ensure that the nodes have distance as a DOF VariableUtils().AddDof<Variable<double> >(DISTANCE, rBaseModelPart); // Generate ModelPart& r_distance_model_part = mrModel.CreateModelPart( mAuxModelPartName ); Element::Pointer p_distance_element = Kratos::make_intrusive<DistanceCalculationElementSimplex<TDim> >(); r_distance_model_part.GetNodalSolutionStepVariablesList() = rBaseModelPart.GetNodalSolutionStepVariablesList(); ConnectivityPreserveModeler modeler; modeler.GenerateModelPart(rBaseModelPart, r_distance_model_part, *p_distance_element); // Using the conditions to mark the boundary with the flag boundary // Note that we DO NOT add the conditions to the model part VariableUtils().SetFlag<ModelPart::NodesContainerType>(BOUNDARY, false, r_distance_model_part.Nodes()); // Note that above we have assigned the same geometry. Thus the flag is // set in the distance model part despite we are iterating the base one for (auto it_cond = rBaseModelPart.ConditionsBegin(); it_cond != rBaseModelPart.ConditionsEnd(); ++it_cond){ Geometry< Node<3> >& geom = it_cond->GetGeometry(); for(unsigned int i=0; i<geom.size(); i++){ geom[i].Set(BOUNDARY,true); } } rBaseModelPart.GetCommunicator().SynchronizeOrNodalFlags(BOUNDARY); r_distance_model_part.GetProcessInfo().SetValue(VARIATIONAL_REDISTANCE_COEFFICIENT_FIRST, mCoefficient1); r_distance_model_part.GetProcessInfo().SetValue(VARIATIONAL_REDISTANCE_COEFFICIENT_SECOND, mCoefficient2); mDistancePartIsInitialized = true; KRATOS_CATCH("") } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ bool IsSplit(const array_1d<double,TDim+1> &rDistances){ unsigned int positives = 0, negatives = 0; for(unsigned int i = 0; i < TDim+1; ++i){ if(rDistances[i] >= 0){ ++positives; } else { ++negatives; } } if (positives > 0 && negatives > 0){ return true; } return false; } void SynchronizeDistance(){ ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); auto &r_communicator = r_distance_model_part.GetCommunicator(); // Only required in the MPI case if(r_communicator.TotalProcesses() != 1){ int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); // Set the distance absolute value #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; it_node->FastGetSolutionStepValue(DISTANCE) = std::abs(it_node->FastGetSolutionStepValue(DISTANCE)); } // Synchronize the unsigned value to minimum r_communicator.SynchronizeCurrentDataToMin(DISTANCE); // Set the distance sign again by retrieving it from the non-historical database #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(it_node->GetValue(DISTANCE) < 0.0){ it_node->FastGetSolutionStepValue(DISTANCE) = -it_node->FastGetSolutionStepValue(DISTANCE); } } } } void SynchronizeFixity(){ ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); auto &r_communicator = r_distance_model_part.GetCommunicator(); // Only required in the MPI case if(r_communicator.TotalProcesses() != 1){ int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); // Synchronize the fixity flag variable to minium // (-1.0 means fixed and 1.0 means free) r_communicator.SynchronizeCurrentDataToMin(FLAG_VARIABLE); // Set the fixity according to the synchronized flag #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; const double &r_fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE); if (r_fix_flag == -1.0){ it_node->Fix(DISTANCE); } } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. VariationalDistanceCalculationProcess& operator=(VariationalDistanceCalculationProcess const& rOther); /// Copy constructor. //VariationalDistanceCalculationProcess(VariationalDistanceCalculationProcess const& rOther); ///@} }; // Class VariationalDistanceCalculationProcess //avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0)); template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1)); template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(2)); ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::istream& operator >> (std::istream& rIStream, VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis); /// output stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::ostream& operator << (std::ostream& rOStream, const VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED defined
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for OpenMP header file here: #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for private(m) for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of OpenMP threads to be Nthreads here: int arg1 = atoi(argv[0]); omp_set_num_threads(atoi(argv[argc-1])); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time double start; start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end; end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
GB_memcpy.c
//------------------------------------------------------------------------------ // GB_memcpy: parallel memcpy //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Note that this function uses its own hard-coded chunk size. #include "GB.h" #define GB_CHUNK (1024*1024) void GB_memcpy // parallel memcpy ( void *dest, // destination const void *src, // source size_t n, // # of bytes to copy int nthreads // # of threads to use ) { if (nthreads <= 1 || n <= GB_CHUNK) { //---------------------------------------------------------------------- // memcpy using a single thread //---------------------------------------------------------------------- memcpy (dest, src, n) ; } else { //---------------------------------------------------------------------- // memcpy using a multiple threads //---------------------------------------------------------------------- nthreads = GB_IMIN (nthreads, n / GB_CHUNK) ; size_t nchunks = 1 + (n / GB_CHUNK) ; GB_void *pdest = dest ; const GB_void *psrc = src ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (size_t k = 0 ; k < nchunks ; k++) { size_t start = k * GB_CHUNK ; if (start < n) { size_t chunk = GB_IMIN (n - start, GB_CHUNK) ; memcpy (pdest + start, psrc + start, chunk) ; } } } }
rar5_fmt_plug.c
/* RAR 5.0 cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia. * * http://www.rarlab.com/technote.htm * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the * following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * $rar5$<salt_len>$<salt>$<iter_log2>$<iv>$<pswcheck_len>$<pswcheck> */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rar5; #elif FMT_REGISTERS_H john_register_one(&fmt_rar5); #else #include <stdint.h> #include <string.h> #include <assert.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "arch.h" #include "johnswap.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "rar5_common.h" //#define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX #include "pbkdf2_hmac_sha256.h" #include "memdbg.h" #define FORMAT_LABEL "RAR5" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA256 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i, j; unsigned char PswCheck[SIZE_PSWCHECK], PswCheckValue[SSE_GROUP_SZ_SHA256][SHA256_DIGEST_SIZE]; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { uint32_t *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (uint32_t*)PswCheckValue[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, &(x.poutc), SHA256_DIGEST_SIZE, 0); // special wtf processing for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[j][i]; memcpy((void*)crypt_out[index+j], PswCheck, SIZE_PSWCHECK); } #else unsigned char PswCheckValue[SHA256_DIGEST_SIZE]; unsigned char PswCheck[SIZE_PSWCHECK]; int i; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, PswCheckValue, SHA256_DIGEST_SIZE, 0); // special wtf processing memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[i]; memcpy((void*)crypt_out[index], PswCheck, SIZE_PSWCHECK); #endif } return count; } static void rar5_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_rar5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, rar5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
d2q9-bgk.c
/* ** Code to implement a d2q9-bgk lattice boltzmann scheme. ** 'd2' inidates a 2-dimensional grid, and ** 'q9' indicates 9 velocities per grid cell. ** 'bgk' refers to the Bhatnagar-Gross-Krook collision step. ** ** The 'speeds' in each cell are numbered as follows: ** ** 6 2 5 ** \|/ ** 3-0-1 ** /|\ ** 7 4 8 ** ** A 2D grid: ** ** cols ** --- --- --- ** | D | E | F | ** rows --- --- --- ** | A | B | C | ** --- --- --- ** ** 'unwrapped' in row major order to give a 1D array: ** ** --- --- --- --- --- --- ** | A | B | C | D | E | F | ** --- --- --- --- --- --- ** ** Grid indicies are: ** ** ny ** ^ cols(jj) ** | ----- ----- ----- ** | | ... | ... | etc | ** | ----- ----- ----- ** rows(ii) | | 1,0 | 1,1 | 1,2 | ** | ----- ----- ----- ** | | 0,0 | 0,1 | 0,2 | ** | ----- ----- ----- ** ----------------------> nx ** ** Note the names of the input parameter and obstacle files ** are passed on the command line, e.g.: ** ** d2q9-bgk.exe input.params obstacles.dat ** ** Be sure to adjust the grid dimensions in the parameter file ** if you choose a different obstacle file. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <omp.h> #define NSPEEDS 9 #define FINALSTATEFILE "final_state.dat" #define AVVELSFILE "av_vels.dat" /* struct to hold the parameter values */ typedef struct { int nx; /* no. of cells in x-direction */ int ny; /* no. of cells in y-direction */ int maxIters; /* no. of iterations */ int tot_cells; int reynolds_dim; /* dimension for Reynolds number */ double density; /* density per link */ double accel; /* density redistribution */ double omega; /* relaxation parameter */ } t_param; /* struct to hold the 'speed' values */ typedef struct { double speeds[NSPEEDS]; } t_speed; /* ** function prototypes */ /* load params, allocate memory, load obstacles & initialise fluid particle densities */ int initialise(const char* paramfile, const char* obstaclefile, t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr); /* ** The main calculation methods. ** timestep calls, in order, the functions: ** accelerate_flow(), propagate(), rebound() & collision() */ double timestep(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles); int accelerate_flow(const t_param params, t_speed* cells, int* obstacles); int propagate(const t_param params, t_speed* cells, t_speed* tmp_cells); int rebound(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles); double collision(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles); int write_values(const t_param params, t_speed* cells, int* obstacles, double* av_vels); /* finalise, including freeing up allocated memory */ int finalise(const t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr); /* Sum all the densities in the grid. ** The total should remain constant from one timestep to the next. */ double total_density(const t_param params, t_speed* cells); /* compute average velocity */ double av_velocity(const t_param params, t_speed* cells, int* obstacles, int ii); /* calculate Reynolds number */ double calc_reynolds(const t_param params, t_speed* cells, int* obstacles); /* utility functions */ void die(const char* message, const int line, const char* file); void usage(const char* exe); /* ** main program: ** initialise, timestep loop, finalise */ int main(int argc, char* argv[]) { char* paramfile = NULL; /* name of the input parameter file */ char* obstaclefile = NULL; /* name of a the input obstacle file */ t_param params; /* struct to hold parameter values */ t_speed* cells = NULL; /* grid containing fluid densities */ t_speed* tmp_cells = NULL; /* scratch space */ int* obstacles = NULL; /* grid indicating which cells are blocked */ double* av_vels = NULL; /* a record of the av. velocity computed for each timestep */ struct timeval timstr; /* structure to hold elapsed time */ struct rusage ru; /* structure to hold CPU time--system and user */ double tic, toc; /* floating point numbers to calculate elapsed wallclock time */ double usrtim; /* floating point number to record elapsed user CPU time */ double systim; /* floating point number to record elapsed system CPU time */ double tot_u; /* parse the command line */ if (argc != 3) { usage(argv[0]); } else { paramfile = argv[1]; obstaclefile = argv[2]; } /* initialise our data structures and load values from file */ initialise(paramfile, obstaclefile, &params, &cells, &tmp_cells, &obstacles, &av_vels); /* iterate for maxIters timesteps */ gettimeofday(&timstr, NULL); tic = timstr.tv_sec + (timstr.tv_usec / 1000000.0); for (int tt = 0; tt < params.maxIters; tt++) { //prob with tot_cells tot_u = timestep(params, cells, tmp_cells, obstacles); av_vels[tt] = tot_u/(double)params.tot_cells; #ifdef DEBUG printf("==timestep: %d==\n", tt); printf("av velocity: %.12E\n", av_vels[tt]); printf("tot density: %.12E\n", total_density(params, cells)); #endif } gettimeofday(&timstr, NULL); toc = timstr.tv_sec + (timstr.tv_usec / 1000000.0); getrusage(RUSAGE_SELF, &ru); timstr = ru.ru_utime; usrtim = timstr.tv_sec + (timstr.tv_usec / 1000000.0); timstr = ru.ru_stime; systim = timstr.tv_sec + (timstr.tv_usec / 1000000.0); /* write final values and free memory */ printf("==done==\n"); printf("Reynolds number:\t\t%.12E\n", calc_reynolds(params, cells, obstacles)); printf("Elapsed time:\t\t\t%.6lf (s)\n", toc - tic); printf("Elapsed user CPU time:\t\t%.6lf (s)\n", usrtim); printf("Elapsed system CPU time:\t%.6lf (s)\n", systim); write_values(params, cells, obstacles, av_vels); finalise(&params, &cells, &tmp_cells, &obstacles, &av_vels); return EXIT_SUCCESS; } double timestep(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles) { double tot_u; #pragma omp parallel proc_bind(master) num_threads(16) { accelerate_flow(params, cells, obstacles); propagate(params, cells, tmp_cells); rebound(params, cells, tmp_cells, obstacles); } tot_u = collision(params, cells, tmp_cells, obstacles); return tot_u; } int accelerate_flow(const t_param params, t_speed* cells, int* obstacles) { /* compute weighting factors */ double w1 = params.density * params.accel / 9.0; double w2 = params.density * params.accel / 36.0; /* modify the 2nd row of the grid */ int ii = params.ny - 2; #pragma omp for for (int jj = 0; jj < params.nx; jj++) { /* if the cell is not occupied and ** we don't send a negative density */ if (!obstacles[ii * params.nx + jj] && (cells[ii * params.nx + jj].speeds[3] - w1) > 0.0 && (cells[ii * params.nx + jj].speeds[6] - w2) > 0.0 && (cells[ii * params.nx + jj].speeds[7] - w2) > 0.0) { /* increase 'east-side' densities */ cells[ii * params.nx + jj].speeds[1] += w1; cells[ii * params.nx + jj].speeds[5] += w2; cells[ii * params.nx + jj].speeds[8] += w2; /* decrease 'west-side' densities */ cells[ii * params.nx + jj].speeds[3] -= w1; cells[ii * params.nx + jj].speeds[6] -= w2; cells[ii * params.nx + jj].speeds[7] -= w2; } } return EXIT_SUCCESS; } int propagate(const t_param params, t_speed* cells, t_speed* tmp_cells) { /* loop over _all_ cells */ #pragma omp for for (int ii = 0; ii < params.ny; ii++) { int y_n,y_s; y_n = (ii + 1) % params.ny; y_s = (ii != 0) ? (ii - 1) : (ii + params.ny - 1); for (int jj = 0; jj < params.nx; jj++) { int x_e,x_w; /* determine indices of axis-direction neighbours ** respecting periodic boundary conditions (wrap around) */ x_e = (jj + 1) % params.nx; x_w = (jj != 0) ? (jj - 1) : (jj + params.nx - 1); /* propagate densities to neighbouring cells, following ** appropriate directions of travel and writing into ** scratch space grid */ tmp_cells[ii * params.nx + jj].speeds[0] = cells[ii * params.nx + jj].speeds[0]; /* central cell, no movement */ tmp_cells[ii * params.nx + x_e].speeds[1] = cells[ii * params.nx + jj].speeds[1]; /* east */ tmp_cells[y_n * params.nx + jj].speeds[2] = cells[ii * params.nx + jj].speeds[2]; /* north */ tmp_cells[ii * params.nx + x_w].speeds[3] = cells[ii * params.nx + jj].speeds[3]; /* west */ tmp_cells[y_s * params.nx + jj].speeds[4] = cells[ii * params.nx + jj].speeds[4]; /* south */ tmp_cells[y_n * params.nx + x_e].speeds[5] = cells[ii * params.nx + jj].speeds[5]; /* north-east */ tmp_cells[y_n * params.nx + x_w].speeds[6] = cells[ii * params.nx + jj].speeds[6]; /* north-west */ tmp_cells[y_s * params.nx + x_w].speeds[7] = cells[ii * params.nx + jj].speeds[7]; /* south-west */ tmp_cells[y_s * params.nx + x_e].speeds[8] = cells[ii * params.nx + jj].speeds[8]; /* south-east */ } } return EXIT_SUCCESS; } int rebound(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles) { /* loop over the cells in the grid */ #pragma omp for nowait for (int ii = 0; ii < params.ny; ii++) { for (int jj = 0; jj < params.nx; jj++) { /* if the cell contains an obstacle */ if (obstacles[ii * params.nx + jj]) { /* called after propagate, so taking values from scratch space ** mirroring, and writing into main grid */ cells[ii * params.nx + jj].speeds[1] = tmp_cells[ii * params.nx + jj].speeds[3]; cells[ii * params.nx + jj].speeds[2] = tmp_cells[ii * params.nx + jj].speeds[4]; cells[ii * params.nx + jj].speeds[3] = tmp_cells[ii * params.nx + jj].speeds[1]; cells[ii * params.nx + jj].speeds[4] = tmp_cells[ii * params.nx + jj].speeds[2]; cells[ii * params.nx + jj].speeds[5] = tmp_cells[ii * params.nx + jj].speeds[7]; cells[ii * params.nx + jj].speeds[6] = tmp_cells[ii * params.nx + jj].speeds[8]; cells[ii * params.nx + jj].speeds[7] = tmp_cells[ii * params.nx + jj].speeds[5]; cells[ii * params.nx + jj].speeds[8] = tmp_cells[ii * params.nx + jj].speeds[6]; } } } return EXIT_SUCCESS; } double collision(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles) { const double c_sq = 1.0 / 3.0; /* square of speed of sound*/ const double inv_c_sq = 3.0 / 1.0; /* inverse of square of speed of sound */ const double w0 = 4.0 / 9.0; /* weighting factor */ const double w1 = 1.0 / 9.0; /* weighting factor */ const double w2 = 1.0 / 36.0; /* weighting factor */ const double inv_double_c_sq = 1.0/(2.0 * c_sq); /*double of square of speed*/ const double inv_sq_c_sq = 1.0/(2.0 * c_sq * c_sq); double temp_u = 0.0; /* loop over the cells in the grid ** NB the collision step is called after ** the propagate step and so values of interest ** are in the scratch-space grid */ #pragma omp parallel for reduction(+:temp_u) for (int ii = 0; ii < params.ny; ii++) { for (int jj = 0; jj < params.nx; jj++) { /* don't consider occupied cells */ if (!obstacles[ii * params.nx + jj]) { /* compute local density total */ double local_density = 0.0; for (int kk = 0; kk < NSPEEDS; kk++) { local_density += tmp_cells[ii * params.nx + jj].speeds[kk]; } double inv_local_density = 1.0 / local_density; /* compute x velocity component */ double u_x = (tmp_cells[ii * params.nx + jj].speeds[1] + tmp_cells[ii * params.nx + jj].speeds[5] + tmp_cells[ii * params.nx + jj].speeds[8] - (tmp_cells[ii * params.nx + jj].speeds[3] + tmp_cells[ii * params.nx + jj].speeds[6] + tmp_cells[ii * params.nx + jj].speeds[7])) * inv_local_density; /* compute y velocity component */ double u_y = (tmp_cells[ii * params.nx + jj].speeds[2] + tmp_cells[ii * params.nx + jj].speeds[5] + tmp_cells[ii * params.nx + jj].speeds[6] - (tmp_cells[ii * params.nx + jj].speeds[4] + tmp_cells[ii * params.nx + jj].speeds[7] + tmp_cells[ii * params.nx + jj].speeds[8])) * inv_local_density; /* velocity squared */ double u_sq = u_x * u_x + u_y * u_y; //velocity temp_u += sqrt(u_sq); /* directional velocity components */ double u[NSPEEDS]; u[1] = u_x; /* east */ u[2] = u_y; /* north */ u[3] = - u_x; /* west */ u[4] = - u_y; /* south */ u[5] = u_x + u_y; /* north-east */ u[6] = - u_x + u_y; /* north-west */ u[7] = - u_x - u_y; /* south-west */ u[8] = u_x - u_y; /* south-east */ /* equilibrium densities */ double d_equ; /* zero velocity density: weight w0 */ d_equ = w0 * local_density * (1.0 - u_sq * inv_double_c_sq); cells[ii * params.nx + jj].speeds[0] = tmp_cells[ii * params.nx + jj].speeds[0] + params.omega * (d_equ - tmp_cells[ii * params.nx + jj].speeds[0]); /* axis speeds: weight w1 */ for(int kk = 1; kk < 5; kk++){ d_equ = w1 * local_density * (1.0 + u[kk] * inv_c_sq + (u[kk] * u[kk]) * inv_sq_c_sq - u_sq * inv_double_c_sq); cells[ii * params.nx + jj].speeds[kk] = tmp_cells[ii * params.nx + jj].speeds[kk] + params.omega * (d_equ - tmp_cells[ii * params.nx + jj].speeds[kk]); } /* diagonal speeds: weight w2 */ for(int kk = 5; kk < 9; kk++){ d_equ = w2 * local_density * (1.0 + u[kk] * inv_c_sq + (u[kk] * u[kk]) * inv_sq_c_sq - u_sq * inv_double_c_sq); cells[ii * params.nx + jj].speeds[kk] = tmp_cells[ii * params.nx + jj].speeds[kk] + params.omega * (d_equ - tmp_cells[ii * params.nx + jj].speeds[kk]); } } } } return temp_u; } double av_velocity(const t_param params, t_speed* cells, int* obstacles, int k) { int tot_cells = 0; /* no. of cells used in calculation */ double tot_u; /* accumulated magnitudes of velocity for each cell */ /* initialise */ tot_u = 0.0; /* loop over all non-blocked cells */ for (int ii = 0; ii < params.ny; ii++) { for (int jj = 0; jj < params.nx; jj++) { /* ignore occupied cells */ if (!obstacles[ii * params.nx + jj]) { /* local density total */ double local_density = 0.0; for (int kk = 0; kk < NSPEEDS; kk++) { local_density += cells[ii * params.nx + jj].speeds[kk]; } /* x-component of velocity */ double u_x = (cells[ii * params.nx + jj].speeds[1] + cells[ii * params.nx + jj].speeds[5] + cells[ii * params.nx + jj].speeds[8] - (cells[ii * params.nx + jj].speeds[3] + cells[ii * params.nx + jj].speeds[6] + cells[ii * params.nx + jj].speeds[7])) / local_density; /* compute y velocity component */ double u_y = (cells[ii * params.nx + jj].speeds[2] + cells[ii * params.nx + jj].speeds[5] + cells[ii * params.nx + jj].speeds[6] - (cells[ii * params.nx + jj].speeds[4] + cells[ii * params.nx + jj].speeds[7] + cells[ii * params.nx + jj].speeds[8])) / local_density; /* accumulate the norm of x- and y- velocity components */ tot_u += sqrt((u_x * u_x) + (u_y * u_y)); /* increase counter of inspected cells */ ++tot_cells; } } } return tot_u / (double)tot_cells; } int initialise(const char* paramfile, const char* obstaclefile, t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr) { char message[1024]; /* message buffer */ FILE* fp; /* file pointer */ int xx, yy; /* generic array indices */ int blocked; /* indicates whether a cell is blocked by an obstacle */ int retval; /* to hold return value for checking */ /* open the parameter file */ fp = fopen(paramfile, "r"); if (fp == NULL) { sprintf(message, "could not open input parameter file: %s", paramfile); die(message, __LINE__, __FILE__); } /* read in the parameter values */ retval = fscanf(fp, "%d\n", &(params->nx)); if (retval != 1) die("could not read param file: nx", __LINE__, __FILE__); retval = fscanf(fp, "%d\n", &(params->ny)); if (retval != 1) die("could not read param file: ny", __LINE__, __FILE__); retval = fscanf(fp, "%d\n", &(params->maxIters)); if (retval != 1) die("could not read param file: maxIters", __LINE__, __FILE__); retval = fscanf(fp, "%d\n", &(params->reynolds_dim)); if (retval != 1) die("could not read param file: reynolds_dim", __LINE__, __FILE__); retval = fscanf(fp, "%lf\n", &(params->density)); if (retval != 1) die("could not read param file: density", __LINE__, __FILE__); retval = fscanf(fp, "%lf\n", &(params->accel)); if (retval != 1) die("could not read param file: accel", __LINE__, __FILE__); retval = fscanf(fp, "%lf\n", &(params->omega)); if (retval != 1) die("could not read param file: omega", __LINE__, __FILE__); /* and close up the file */ fclose(fp); /* ** Allocate memory. ** ** Remember C is pass-by-value, so we need to ** pass pointers into the initialise function. ** ** NB we are allocating a 1D array, so that the ** memory will be contiguous. We still want to ** index this memory as if it were a (row major ** ordered) 2D array, however. We will perform ** some arithmetic using the row and column ** coordinates, inside the square brackets, when ** we want to access elements of this array. ** ** Note also that we are using a structure to ** hold an array of 'speeds'. We will allocate ** a 1D array of these structs. */ /* main grid */ *cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx)); if (*cells_ptr == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__); /* 'helper' grid, used as scratch space */ *tmp_cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx)); if (*tmp_cells_ptr == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__); /* the map of obstacles */ *obstacles_ptr = malloc(sizeof(int) * (params->ny * params->nx)); if (*obstacles_ptr == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__); /* initialise densities */ double w0 = params->density * 4.0 / 9.0; double w1 = params->density / 9.0; double w2 = params->density / 36.0; for (int ii = 0; ii < params->ny; ii++) { for (int jj = 0; jj < params->nx; jj++) { /* centre */ (*cells_ptr)[ii * params->nx + jj].speeds[0] = w0; /* axis directions */ (*cells_ptr)[ii * params->nx + jj].speeds[1] = w1; (*cells_ptr)[ii * params->nx + jj].speeds[2] = w1; (*cells_ptr)[ii * params->nx + jj].speeds[3] = w1; (*cells_ptr)[ii * params->nx + jj].speeds[4] = w1; /* diagonals */ (*cells_ptr)[ii * params->nx + jj].speeds[5] = w2; (*cells_ptr)[ii * params->nx + jj].speeds[6] = w2; (*cells_ptr)[ii * params->nx + jj].speeds[7] = w2; (*cells_ptr)[ii * params->nx + jj].speeds[8] = w2; } } /* first set all cells in obstacle array to zero */ for (int ii = 0; ii < params->ny; ii++) { for (int jj = 0; jj < params->nx; jj++) { (*obstacles_ptr)[ii * params->nx + jj] = 0; } } /* open the obstacle data file */ fp = fopen(obstaclefile, "r"); if (fp == NULL) { sprintf(message, "could not open input obstacles file: %s", obstaclefile); die(message, __LINE__, __FILE__); } /* read-in the blocked cells list */ while ((retval = fscanf(fp, "%d %d %d\n", &xx, &yy, &blocked)) != EOF) { /* some checks */ if (retval != 3) die("expected 3 values per line in obstacle file", __LINE__, __FILE__); if (xx < 0 || xx > params->nx - 1) die("obstacle x-coord out of range", __LINE__, __FILE__); if (yy < 0 || yy > params->ny - 1) die("obstacle y-coord out of range", __LINE__, __FILE__); if (blocked != 1) die("obstacle blocked value should be 1", __LINE__, __FILE__); /* assign to array */ (*obstacles_ptr)[yy * params->nx + xx] = blocked; //params->tot_cells--; } params->tot_cells = 0; for(int ii = 0; ii < params->nx*params->ny; ii++) if(!(*obstacles_ptr)[ii]) params->tot_cells++; /* and close the file */ fclose(fp); /* ** allocate space to hold a record of the avarage velocities computed ** at each timestep */ *av_vels_ptr = (double*)malloc(sizeof(double) * params->maxIters); return EXIT_SUCCESS; } int finalise(const t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr) { /* ** free up allocated memory */ free(*cells_ptr); *cells_ptr = NULL; free(*tmp_cells_ptr); *tmp_cells_ptr = NULL; free(*obstacles_ptr); *obstacles_ptr = NULL; free(*av_vels_ptr); *av_vels_ptr = NULL; return EXIT_SUCCESS; } double calc_reynolds(const t_param params, t_speed* cells, int* obstacles) { const double viscosity = 1.0 / 6.0 * (2.0 / params.omega - 1.0); double av_vel = 0; // for(int ii = 0; ii < params.ny*params.nx; ii++) av_vel = av_velocity(params, cells, obstacles, 0); return av_vel * params.reynolds_dim / viscosity; } double total_density(const t_param params, t_speed* cells) { double total = 0.0; /* accumulator */ for (int ii = 0; ii < params.ny; ii++) { for (int jj = 0; jj < params.nx; jj++) { for (int kk = 0; kk < NSPEEDS; kk++) { total += cells[ii * params.nx + jj].speeds[kk]; } } } return total; } int write_values(const t_param params, t_speed* cells, int* obstacles, double* av_vels) { FILE* fp; /* file pointer */ const double c_sq = 1.0 / 3.0; /* sq. of speed of sound */ double local_density; /* per grid cell sum of densities */ double pressure; /* fluid pressure in grid cell */ double u_x; /* x-component of velocity in grid cell */ double u_y; /* y-component of velocity in grid cell */ double u; /* norm--root of summed squares--of u_x and u_y */ fp = fopen(FINALSTATEFILE, "w"); if (fp == NULL) { die("could not open file output file", __LINE__, __FILE__); } for (int ii = 0; ii < params.ny; ii++) { for (int jj = 0; jj < params.nx; jj++) { /* an occupied cell */ if (obstacles[ii * params.nx + jj]) { u_x = u_y = u = 0.0; pressure = params.density * c_sq; } /* no obstacle */ else { local_density = 0.0; for (int kk = 0; kk < NSPEEDS; kk++) { local_density += cells[ii * params.nx + jj].speeds[kk]; } /* compute x velocity component */ u_x = (cells[ii * params.nx + jj].speeds[1] + cells[ii * params.nx + jj].speeds[5] + cells[ii * params.nx + jj].speeds[8] - (cells[ii * params.nx + jj].speeds[3] + cells[ii * params.nx + jj].speeds[6] + cells[ii * params.nx + jj].speeds[7])) / local_density; /* compute y velocity component */ u_y = (cells[ii * params.nx + jj].speeds[2] + cells[ii * params.nx + jj].speeds[5] + cells[ii * params.nx + jj].speeds[6] - (cells[ii * params.nx + jj].speeds[4] + cells[ii * params.nx + jj].speeds[7] + cells[ii * params.nx + jj].speeds[8])) / local_density; /* compute norm of velocity */ u = sqrt((u_x * u_x) + (u_y * u_y)); /* compute pressure */ pressure = local_density * c_sq; } /* write to file */ fprintf(fp, "%d %d %.12E %.12E %.12E %.12E %d\n", jj, ii, u_x, u_y, u, pressure, obstacles[ii * params.nx + jj]); } } fclose(fp); fp = fopen(AVVELSFILE, "w"); if (fp == NULL) { die("could not open file output file", __LINE__, __FILE__); } for (int ii = 0; ii < params.maxIters; ii++) { fprintf(fp, "%d:\t%.12E\n", ii, av_vels[ii]); } fclose(fp); return EXIT_SUCCESS; } void die(const char* message, const int line, const char* file) { fprintf(stderr, "Error at line %d of file %s:\n", line, file); fprintf(stderr, "%s\n", message); fflush(stderr); exit(EXIT_FAILURE); } void usage(const char* exe) { fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe); exit(EXIT_FAILURE); }
manyKernels.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define KERNEL(num) \ void foo_##num() { \ int i, n, j; \ int index[5]; \ long a[5], b[270][270], sum, neg; \ long t[5]; \ long sum2 = 999999; \ long sum3 = 0; \ long temp; \ n = 5; \ for (i=0; i < n; i++ ) { \ for (j=0; j < n; j++ ) { \ b[i][j] = j + i * 1.0; \ index[j] = j; \ temp = (b[i][j] + 1 * 23.3465236) / ((i+j+1) * 1000); \ sum2 = temp < sum2 ? temp : sum; \ } \ } \ sum = 0; \ _Pragma("omp target map(tofrom: b, sum, t) map(alloc: a)") \ { \ _Pragma("omp teams reduction(+:sum)") \ { \ _Pragma("pragma omp distribute parallel for reduction(+:sum)") \ for (i=0; i < n; i++) { \ a[index[i]] = i+ 10 * 1; \ t[i] = omp_get_team_num(); \ sum = a[i]; \ } \ } \ } \ for (i = 0; i < n; ++i) { \ sum3 += a[i]; \ printf(" %d - %ld - %ld\n", i, a[i], t[i]); \ } \ printf(" Sum = %ld\n",sum); \ printf(" Sum3 = %ld\n",sum3); \ } KERNEL(1) KERNEL(2) KERNEL(3) KERNEL(4) KERNEL(5) KERNEL(6) KERNEL(7) KERNEL(8) KERNEL(9) KERNEL(10) KERNEL(11) KERNEL(12) KERNEL(13) KERNEL(14) KERNEL(15) KERNEL(16) KERNEL(17) KERNEL(18) KERNEL(19) KERNEL(20) KERNEL(21) KERNEL(22) KERNEL(23) KERNEL(24) KERNEL(25) KERNEL(26) KERNEL(27) KERNEL(28) KERNEL(29) KERNEL(30) KERNEL(31) int main (int argc, char *argv[]) { int i, n, j; int index[5]; long a[5], b[270][270], sum, neg; long t[5]; long sum2 = 999999; long sum3 = 0; long temp; /* Some initializations */ n = 5; for (i=0; i < n; i++ ) { for (j=0; j < n; j++ ) { b[i][j] = j + i * 1.0; index[j] = j; temp = (b[i][j] + 1 * 23.3465236) / ((i+j+1) * 1000); sum2 = temp < sum2 ? temp : sum; } } foo_1(); foo_2(); sum = 0; #pragma omp target map(tofrom: b, sum, t) map(alloc: a) { #pragma omp teams reduction(+:sum) { #pragma omp distribute parallel for reduction(+:sum) for (i=0; i < n; i++) { a[index[i]] = i+ 10 * 1; t[i] = omp_get_team_num(); //if (a[i] < sum) sum = a[i]; } } } for (i = 0; i < n; ++i) { sum3 += a[i]; printf(" %d - %ld - %ld\n", i, a[i], t[i]); } printf(" Sum = %ld\n",sum); printf(" Sum3 = %ld\n",sum3); return 0; }
RansacFitter.h
#ifndef SPECTAVI_RANSACFITTER_H #define SPECTAVI_RANSACFITTER_H #include "Camera.h" #include "DltTriangulator.h" #include "EigenDefinitions.h" #include "FundamentalMatrixFitter.h" #include <random> #include <iostream> #include <unordered_set> #ifdef ENABLE_OPENMP #include <omp.h> #endif namespace spectavi { #define PROGRESS_BAR_LENGTH 100 template <typename MatrixType = RowMatrixXd, typename MatrixTypeI = RowMatrixXi> class RansacFitter { typedef typename MatrixType::Scalar Scalar; typedef Eigen::Map<const MatrixType> MatrixTypeMapConst; private: typedef Camera<MatrixType> Camera_t; MatrixTypeMapConst m_x0; MatrixTypeMapConst m_x1; double m_required_percent_inliers; double m_reprojection_error_allowed; int m_maximum_tries; MatrixType m_best_fit_essential_matrix; Camera_t m_best_fit_camera; double m_best_fit_inlier_percent; bool m_success; bool m_find_best_even_in_failure; MatrixTypeI m_inlier_idx; bool process_fundamental_matrix(const Eigen::Ref<const MatrixType> &F, double singular_value_ratio_allowed, int &inlier_count, Camera_t &best_P, MatrixTypeI &inlier_idx) const { bool success = false; Eigen::JacobiSVD<MatrixType> svd(F, Eigen::ComputeFullV | Eigen::ComputeFullU); auto sv = svd.singularValues(); double ratio = std::abs(sv(0) - sv(1)) / (std::abs(sv(0) + sv(1)) / 2.); if (ratio > singular_value_ratio_allowed) { return success; } MatrixType ee(3, 1); ee << 1., 1., 0; MatrixType E = svd.matrixU() * ee.asDiagonal() * svd.matrixV().transpose(); Camera_t P0; double best_percent_inlier = 0; for (Camera_t P1 : Essential2Cameras<std::vector<Camera_t>, MatrixType>(E)) { DltTriangulator<MatrixType> dlt(P0.get_matrix().data(), P1.get_matrix().data()); int nex = m_x0.rows(); int ninlier = 0; for (int ix = 0; ix < nex; ++ix) { dlt.solve(m_x0.row(ix).data(), m_x1.row(ix).data(), 3); bool is_inlier = dlt.reprojection_error() <= m_reprojection_error_allowed && dlt.is_infront_both_cameras(); if (is_inlier) { ninlier++; } } double percent_inlier = ninlier / (double)(nex); if ((percent_inlier >= m_required_percent_inliers || m_find_best_even_in_failure) && (percent_inlier > best_percent_inlier)) { best_percent_inlier = percent_inlier; inlier_count = ninlier; best_P = P1; success = true; // When successful, find the indices of inliers inlier_idx.resize(ninlier, 1); int iidx = 0; // XXX: Duplicated code, see above for (int ix = 0; ix < nex; ++ix) { dlt.solve(m_x0.row(ix).data(), m_x1.row(ix).data(), 3); bool is_inlier = dlt.reprojection_error() <= m_reprojection_error_allowed && dlt.is_infront_both_cameras(); if (is_inlier) { inlier_idx(iidx++) = ix; } } } } return success; } template <typename OutputContainer> OutputContainer draw_random_indices(int range, int k) const { OutputContainer ret(k); std::vector<int> arange(range, 0); for (int i = 0; i < range; ++i) { arange[i] = i; } std::random_device rd; std::mt19937 g(rd()); std::shuffle(arange.begin(), arange.end(), g); for (int i = 0; i < k; ++i) { ret[i] = arange[i]; } return ret; } // taken from https://stackoverflow.com/a/58198037/177931 std::unordered_set<int> floyd_sample(int N, int k) const { // std::default_random_engine gen; std::random_device rd; std::mt19937 gen(rd()); // for the benchmark I used a faster hash table std::unordered_set<int> elems; // preallocation is good for (int r = N - k; r < N; ++r) { int v = std::uniform_int_distribution<>(1, r)(gen); if (!elems.insert(v).second) elems.insert(r); } return elems; } public: RansacFitter(const Scalar *x0, const Scalar *x1, int npt, double required_percent_inliers, double reprojection_error_allowed, int maximum_tries, bool find_best_even_in_failure) : m_x0(x0, npt, 3), m_x1(x1, npt, 3), m_required_percent_inliers(required_percent_inliers), m_reprojection_error_allowed(reprojection_error_allowed), m_maximum_tries(maximum_tries), m_success(false), m_best_fit_inlier_percent(0.), m_find_best_even_in_failure(find_best_even_in_failure) { if (m_x0.rows() != m_x1.rows()) { throw std::invalid_argument( "Supplied incorrect point matches, numbers do not match."); } if (m_x0.rows() < 10) { throw std::invalid_argument( "Supplied less than 10 point matches, unsupported."); } } void fit_essential(double singular_value_ratio_allowed = 3e-2, bool progressbar = false, int nthread = 8 ) { int nex = m_x0.rows(); if (!m_success) { m_best_fit_inlier_percent = 0; } bool _success = m_success; #ifdef ENABLE_OPENMP omp_lock_t success_lock; omp_init_lock(&success_lock); #endif int done = 0; #pragma omp parallel for num_threads(nthread) shared(_success) for (int itry = 0; itry < m_maximum_tries; ++itry) { if (progressbar) { #pragma omp critical { done += 1; std::cout << "\r |"; double percent_done = (double(done)) / m_maximum_tries; for (int i = 0; i < PROGRESS_BAR_LENGTH; ++i) { if (i <= percent_done * PROGRESS_BAR_LENGTH) { std::cout << "-"; } else { std::cout << " "; } } std::cout << "|" << std::flush; } } if (_success) { continue; } FundamentalMatrixFitter<MatrixType> fmat_fitter; for (int ix : floyd_sample(nex, 7)) { MatrixType row0 = m_x0.row(ix).hnormalized().eval(); MatrixType row1 = m_x1.row(ix).hnormalized().eval(); double x = row0(0); double y = row0(1); double xp = row1(0); double yp = row1(1); fmat_fitter.add_putative_match(x, y, xp, yp); } MatrixType F0, F1, F2; MatrixTypeI inlier_idx; Camera_t cam; int nroot = fmat_fitter.solve(F0, F1, F2); int ninlier = 0; if (nroot >= 1 && process_fundamental_matrix(F0, singular_value_ratio_allowed, ninlier, cam, inlier_idx)) { double percent_inlier = (double)ninlier / (double)nex; if ((percent_inlier > m_required_percent_inliers || m_find_best_even_in_failure) && m_best_fit_inlier_percent < percent_inlier) { #ifdef ENABLE_OPENMP omp_set_lock(&success_lock); #endif if (!_success) { _success = percent_inlier > m_required_percent_inliers; m_best_fit_inlier_percent = percent_inlier; m_best_fit_essential_matrix = F0; m_best_fit_camera = cam; m_inlier_idx = inlier_idx; } #ifdef ENABLE_OPENMP omp_unset_lock(&success_lock); #endif } } if (nroot >= 2 && process_fundamental_matrix(F1, singular_value_ratio_allowed, ninlier, cam, inlier_idx)) { double percent_inlier = (double)ninlier / (double)nex; if ((percent_inlier > m_required_percent_inliers || m_find_best_even_in_failure) && m_best_fit_inlier_percent < percent_inlier) { #ifdef ENABLE_OPENMP omp_set_lock(&success_lock); #endif if (!_success) { _success = percent_inlier > m_required_percent_inliers; m_best_fit_inlier_percent = percent_inlier; m_best_fit_essential_matrix = F1; m_best_fit_camera = cam; m_inlier_idx = inlier_idx; } #ifdef ENABLE_OPENMP omp_unset_lock(&success_lock); #endif } } if (nroot >= 3 && process_fundamental_matrix(F2, singular_value_ratio_allowed, ninlier, cam, inlier_idx)) { double percent_inlier = (double)ninlier / (double)nex; if ((percent_inlier > m_required_percent_inliers || m_find_best_even_in_failure) && m_best_fit_inlier_percent < percent_inlier) { #ifdef ENABLE_OPENMP omp_set_lock(&success_lock); #endif if (!_success) { _success = percent_inlier > m_required_percent_inliers; m_best_fit_inlier_percent = percent_inlier; m_best_fit_essential_matrix = F2; m_best_fit_camera = cam; m_inlier_idx = inlier_idx; } #ifdef ENABLE_OPENMP omp_unset_lock(&success_lock); #endif } } } if (progressbar) { std::cout << std::endl; } #ifdef ENABLE_OPENMP omp_destroy_lock(&success_lock); #endif m_success = _success; } bool success() const { return m_success; } MatrixType essential() const { return m_best_fit_essential_matrix; } MatrixType camera() const { return m_best_fit_camera.get_matrix(); } MatrixTypeI inlier_idx() const { return m_inlier_idx; } double inlier_percent() const { return m_best_fit_inlier_percent; } }; #undef PROGRESS_BAR_LENGTH } // namespace spectavi #endif // SPECTAVI_RANSACFITTER_H
test-zrocks.c
/* xZTL: Zone Translation Layer User-space Library * * Copyright 2019 Samsung Electronics * * Written by Ivan L. Picoli <i.picoli@samsung.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <omp.h> #include <stdint.h> #include <stdlib.h> #include <xztl.h> #include <libzrocks.h> #include "CUnit/Basic.h" /* Number of Objects */ #define TEST_N_BUFFERS 2 /* Number of random objects to read */ #define TEST_RANDOM_ID 2 /* Object Size */ #define TEST_BUFFER_SZ (1024 * 1024 * 16) /* 16 MB */ static uint8_t *wbuf[TEST_N_BUFFERS]; static uint8_t *rbuf[TEST_N_BUFFERS]; static const char **devname; static void cunit_zrocks_assert_ptr(char *fn, void *ptr) { CU_ASSERT((uint64_t)ptr != 0); if (!ptr) printf("\n %s: ptr %p\n", fn, ptr); } static void cunit_zrocks_assert_int(char *fn, uint64_t status) { CU_ASSERT(status == 0); if (status) printf("\n %s: %lx\n", fn, status); } static int cunit_zrocks_init(void) { return 0; } static int cunit_zrocks_exit(void) { return 0; } static void test_zrocks_init(void) { int ret; ret = zrocks_init(*devname); cunit_zrocks_assert_int("zrocks_init", ret); } static void test_zrocks_exit(void) { zrocks_exit(); } static void test_zrocks_fill_buffer(uint32_t id) { uint32_t byte; uint8_t value = 0x1; for (byte = 0; byte < TEST_BUFFER_SZ; byte += 16) { value += 0x1; memset(&wbuf[id][byte], value, 16); } } static int test_zrocks_check_buffer(uint32_t id, uint32_t off, uint32_t size) { /*printf (" \nMem check:\n"); for (int i = off; i < off + size; i++) { if (i % 16 == 0 && i) printf("\n %d-%d ", i - (i%16), (i - (i%16)) + 16); printf (" %x/%x", wbuf[id][i], rbuf[id][i]); } printf("\n"); */ return memcmp(wbuf[id], rbuf[id], size); } static void test_zrocks_new(void) { uint32_t ids; uint64_t id; uint32_t size; uint8_t level; int ret[TEST_N_BUFFERS]; ids = TEST_N_BUFFERS; size = TEST_BUFFER_SZ; level = 0; #pragma omp parallel for for (id = 0; id < ids; id++) { /* Allocate DMA memory */ wbuf[id] = xztl_media_dma_alloc(size); cunit_zrocks_assert_ptr("xztl_media_dma_alloc", wbuf[id]); if (!wbuf[id]) continue; test_zrocks_fill_buffer(id); ret[id] = zrocks_new(id + 1, wbuf[id], size, level); cunit_zrocks_assert_int("zrocks_new", ret[id]); } } static void test_zrocks_read(void) { uint32_t ids, offset; uint64_t id; int ret[TEST_N_BUFFERS]; size_t read_sz, size; ids = TEST_N_BUFFERS; read_sz = 1024 * 64; /* 64 KB */ size = TEST_BUFFER_SZ; for (id = 0; id < ids; id++) { /* Allocate DMA memory */ rbuf[id] = xztl_media_dma_alloc(size); cunit_zrocks_assert_ptr("xztl_media_dma_alloc", rbuf[id]); if (!rbuf[id]) continue; memset(rbuf[id], 0x0, size); offset = 0; while (offset < size) { ret[id] = zrocks_read_obj(id + 1, offset, rbuf[id] + offset, read_sz); cunit_zrocks_assert_int("zrocks_read_obj", ret[id]); if (ret[id]) printf("Read error: ID %lu, offset %d, status: %x\n", id + 1, offset, ret[id]); offset += read_sz; } ret[id] = test_zrocks_check_buffer(id, 0, TEST_BUFFER_SZ); cunit_zrocks_assert_int("zrocks_read_obj:check", ret[id]); if (ret[id]) printf("Corruption: ID %lu, corrupted: %d bytes\n", id + 1, ret[id]); xztl_media_dma_free(rbuf[id]); } } static void test_zrocks_random_read(void) { uint64_t id; uint64_t random_off[4] = {63, 24567, 175678, 267192}; size_t random_sz[4] = {532, 53, 2695, 1561}; // uint64_t random_off[1] = {24567}; // size_t random_sz[1] = {53}; int readi, ret; uint8_t *buf, *woff; id = TEST_RANDOM_ID; buf = xztl_media_dma_alloc(1024 * 512); cunit_zrocks_assert_ptr("xztl_media_dma_alloc", buf); if (!buf) return; for (readi = 0; readi < 4; readi++) { memset(buf, 0x0, random_sz[readi]); ret = zrocks_read_obj(id, random_off[readi], buf, random_sz[readi]); cunit_zrocks_assert_int("zrocks_read_obj", ret); woff = &wbuf[id - 1][random_off[readi]]; /* Uncomment for a detailed read check (per-byte print) printf (" \nMem check:\n"); for (int i = 0; i < random_sz[readi] + 4096; i++) { if (i % 16 == 0) printf("\n %lu-%lu ", (i+random_off[readi]) - ((i+random_off[readi]) % 16) + random_off[readi] % 16, ((i+random_off[readi]) - ((i+random_off[readi]) % 16)) + 16 + random_off[readi] % 16); printf (" %x/%x", woff[i], buf[i]); } printf("\n"); */ cunit_zrocks_assert_int("zrocks_read_obj:check", memcmp(woff, buf, random_sz[readi])); } xztl_media_dma_free(buf); for (int i = 0; i < TEST_N_BUFFERS; i++) xztl_media_dma_free(wbuf[i]); } int main(int argc, const char **argv) { int failed; if (argc < 2) { printf("Please provide the device path. e.g. liou:/dev/nvme0n2\n"); return -1; } devname = &argv[1]; printf("Device: %s\n", *devname); CU_pSuite pSuite = NULL; if (CUE_SUCCESS != CU_initialize_registry()) return CU_get_error(); pSuite = CU_add_suite("Suite_zrocks", cunit_zrocks_init, cunit_zrocks_exit); if (pSuite == NULL) { CU_cleanup_registry(); return CU_get_error(); } if ((CU_add_test(pSuite, "Initialize ZRocks", test_zrocks_init) == NULL) || (CU_add_test(pSuite, "ZRocks New", test_zrocks_new) == NULL) || (CU_add_test(pSuite, "ZRocks Read", test_zrocks_read) == NULL) || (CU_add_test(pSuite, "ZRocks Random Read", test_zrocks_random_read) == NULL) || (CU_add_test(pSuite, "Close ZRocks", test_zrocks_exit) == NULL)) { CU_cleanup_registry(); return CU_get_error(); } CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); return failed; }
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImageChannel method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImageChannel(Image *image, % const ChannelType channel,const MagickEvaluateOperator op, % const double value,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels) { register ssize_t i; assert(pixels != (MagickPixelPacket **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (MagickPixelPacket *) NULL) pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static MagickPixelPacket **AcquirePixelThreadSet(const Image *image, const size_t number_images) { register ssize_t i, j; MagickPixelPacket **pixels; size_t length, number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (MagickPixelPacket **) NULL) return((MagickPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { length=image->columns; if (length < number_images) length=number_images; pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length, sizeof(**pixels)); if (pixels[i] == (MagickPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) length; j++) GetMagickPixelPacket(image,&pixels[i][j]); } return(pixels); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const MagickPixelPacket *color_1, *color_2; int intensity; color_1=(const MagickPixelPacket *) x; color_2=(const MagickPixelPacket *) y; intensity=(int) MagickPixelIntensity(color_2)- (int) MagickPixelIntensity(color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info, Quantum pixel,const MagickEvaluateOperator op,const MagickRealType value) { MagickRealType result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(MagickRealType) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() which returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale* pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, ImpulseNoise,value); break; } case LaplacianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value* pixel+1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(MagickRealType) MagickMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MedianEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MinEvaluateOperator: { result=(MagickRealType) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(MagickRealType) (value*pixel); break; } case OrEvaluateOperator: { result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, PoissonNoise,value); break; } case PowEvaluateOperator: { result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5)); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(MagickRealType) (pixel-value); break; } case ThresholdEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, UniformNoise,value); break; } case XorEvaluateOperator: { result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { MagickBooleanType status; status=EvaluateImageChannel(image,CompositeChannels,op,value,exception); return(status); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; const Image *next; Image *evaluate_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **restrict evaluate_pixels, zero; RandomInfo **restrict random_info; size_t number_images; ssize_t y; /* Ensure the image are the same size. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=images; next != (Image *) NULL; next=GetNextImageInList(next)) if ((next->columns != images->columns) || (next->rows != images->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ImageWidthsOrHeightsDiffer","`%s'",images->filename); return((Image *) NULL); } /* Initialize evaluate next attributes. */ evaluate_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (evaluate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(evaluate_image,DirectClass) == MagickFalse) { InheritException(exception,&evaluate_image->exception); evaluate_image=DestroyImage(evaluate_image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images,number_images); if (evaluate_pixels == (MagickPixelPacket **) NULL) { evaluate_image=DestroyImage(evaluate_image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireCacheView(evaluate_image); if (op == MedianEvaluateOperator) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=0; y < (ssize_t) evaluate_image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,evaluate_image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) evaluate_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_images; i++) evaluate_pixel[i]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id], GetRedPixelComponent(p),op,evaluate_pixel[i].red); evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id], GetGreenPixelComponent(p),op,evaluate_pixel[i].green); evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id], GetBluePixelComponent(p),op,evaluate_pixel[i].blue); evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id], GetOpacityPixelComponent(p),op,evaluate_pixel[i].opacity); if (evaluate_image->colorspace == CMYKColorspace) evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id], *indexes,op,evaluate_pixel[i].index); image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); SetRedPixelComponent(q,ClampToQuantum(evaluate_pixel[i/2].red)); SetGreenPixelComponent(q,ClampToQuantum(evaluate_pixel[i/2].green)); SetBluePixelComponent(q,ClampToQuantum(evaluate_pixel[i/2].blue)); if (evaluate_image->matte == MagickFalse) SetOpacityPixelComponent(q,ClampToQuantum( evaluate_pixel[i/2].opacity)); else SetAlphaPixelComponent(q,ClampToQuantum(evaluate_pixel[i/2].opacity)); if (evaluate_image->colorspace == CMYKColorspace) SetIndexPixelComponent(evaluate_indexes+i,ClampToQuantum( evaluate_pixel[i/2].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, evaluate_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } else #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=0; y < (ssize_t) evaluate_image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register ssize_t i, x; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,evaluate_image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) evaluate_image->columns; x++) evaluate_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) next->columns; x++) { evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id], GetRedPixelComponent(p),i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].red); evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id], GetGreenPixelComponent(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].green); evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id], GetBluePixelComponent(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].blue); evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id], GetOpacityPixelComponent(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].opacity); if (evaluate_image->colorspace == CMYKColorspace) evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id], GetIndexPixelComponent(indexes+x),i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].index); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (op == MeanEvaluateOperator) for (x=0; x < (ssize_t) evaluate_image->columns; x++) { evaluate_pixel[x].red/=number_images; evaluate_pixel[x].green/=number_images; evaluate_pixel[x].blue/=number_images; evaluate_pixel[x].opacity/=number_images; evaluate_pixel[x].index/=number_images; } for (x=0; x < (ssize_t) evaluate_image->columns; x++) { SetRedPixelComponent(q,ClampToQuantum(evaluate_pixel[x].red)); SetGreenPixelComponent(q,ClampToQuantum(evaluate_pixel[x].green)); SetBluePixelComponent(q,ClampToQuantum(evaluate_pixel[x].blue)); if (evaluate_image->matte == MagickFalse) SetOpacityPixelComponent(q,ClampToQuantum(evaluate_pixel[x].opacity)); else SetAlphaPixelComponent(q,ClampToQuantum(evaluate_pixel[x].opacity)); if (evaluate_image->colorspace == CMYKColorspace) SetIndexPixelComponent(evaluate_indexes+x,ClampToQuantum( evaluate_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, evaluate_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) evaluate_image=DestroyImage(evaluate_image); return(evaluate_image); } MagickExport MagickBooleanType EvaluateImageChannel(Image *image, const ChannelType channel,const MagickEvaluateOperator op,const double value, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **restrict random_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetRedPixelComponent(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetRedPixelComponent(q),op,value))); if ((channel & GreenChannel) != 0) SetGreenPixelComponent(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetGreenPixelComponent(q),op,value))); if ((channel & BlueChannel) != 0) SetBluePixelComponent(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetBluePixelComponent(q),op,value))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetOpacityPixelComponent(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetOpacityPixelComponent(q),op,value))); else SetAlphaPixelComponent(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],(Quantum) GetAlphaPixelComponent(q),op,value))); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetIndexPixelComponent(indexes+x,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetIndexPixelComponent(indexes+x),op,value))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImageChannel) #endif proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImageChannel method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % MagickBooleanType FunctionImageChannel(Image *image, % const ChannelType channel,const MagickFunction function, % const ssize_t number_parameters,const double *argument, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { MagickRealType result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* * Polynomial * Parameters: polynomial constants, highest to lowest order * For example: c0*x^3 + c1*x^2 + c2*x + c3 */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result = result*QuantumScale*pixel + parameters[i]; result *= QuantumRange; break; } case SinusoidFunction: { /* Sinusoid Function * Parameters: Freq, Phase, Ampl, bias */ double freq,phase,ampl,bias; freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0; phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0; ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI* (freq*QuantumScale*pixel + phase/360.0) )) + bias ) ); break; } case ArcsinFunction: { /* Arcsin Function (peged at range limits for invalid results) * Parameters: Width, Center, Range, Bias */ double width,range,center,bias; width = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result = 2.0/width*(QuantumScale*pixel - center); if ( result <= -1.0 ) result = bias - range/2.0; else if ( result >= 1.0 ) result = bias + range/2.0; else result=(MagickRealType) (range/MagickPI*asin((double) result)+bias); result *= QuantumRange; break; } case ArctanFunction: { /* Arctan Function * Parameters: Slope, Center, Range, Bias */ double slope,range,center,bias; slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center)); result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double) result) + bias ) ); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { MagickBooleanType status; status=FunctionImageChannel(image,CompositeChannels,function,number_parameters, parameters,exception); return(status); } MagickExport MagickBooleanType FunctionImageChannel(Image *image, const ChannelType channel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetRedPixelComponent(q,ApplyFunction(GetRedPixelComponent(q), function,number_parameters,parameters,exception)); if ((channel & GreenChannel) != 0) SetGreenPixelComponent(q,ApplyFunction(GetGreenPixelComponent(q), function,number_parameters,parameters,exception)); if ((channel & BlueChannel) != 0) SetBluePixelComponent(q,ApplyFunction(GetBluePixelComponent(q), function,number_parameters,parameters,exception)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetOpacityPixelComponent(q,ApplyFunction( GetOpacityPixelComponent(q),function,number_parameters,parameters, exception)); else SetAlphaPixelComponent(q,ApplyFunction((Quantum) GetAlphaPixelComponent(q),function,number_parameters,parameters, exception)); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetIndexPixelComponent(indexes+x,ApplyFunction(GetIndexPixelComponent( indexes+x),function,number_parameters,parameters,exception)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FunctionImageChannel) #endif proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelExtrema() returns the extrema of one or more image channels. % % The format of the GetImageChannelExtrema method is: % % MagickBooleanType GetImageChannelExtrema(const Image *image, % const ChannelType channel,size_t *minima,size_t *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image, const ChannelType channel,size_t *minima,size_t *maxima, ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageChannelRange(image,channel,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMean() returns the mean and standard deviation of one or more % image channels. % % The format of the GetImageChannelMean method is: % % MagickBooleanType GetImageChannelMean(const Image *image, % const ChannelType channel,double *mean,double *standard_deviation, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation, exception); return(status); } MagickExport MagickBooleanType GetImageChannelMean(const Image *image, const ChannelType channel,double *mean,double *standard_deviation, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; size_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageChannelStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); channels=0; channel_statistics[CompositeChannels].mean=0.0; channel_statistics[CompositeChannels].standard_deviation=0.0; if ((channel & RedChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[RedChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[RedChannel].variance- channel_statistics[RedChannel].mean* channel_statistics[RedChannel].mean; channels++; } if ((channel & GreenChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[GreenChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[GreenChannel].variance- channel_statistics[GreenChannel].mean* channel_statistics[GreenChannel].mean; channels++; } if ((channel & BlueChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlueChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlueChannel].variance- channel_statistics[BlueChannel].mean* channel_statistics[BlueChannel].mean; channels++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { channel_statistics[CompositeChannels].mean+= channel_statistics[OpacityChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[OpacityChannel].variance- channel_statistics[OpacityChannel].mean* channel_statistics[OpacityChannel].mean; channels++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlackChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlackChannel].variance- channel_statistics[BlackChannel].mean* channel_statistics[BlackChannel].mean; channels++; } channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); *mean=channel_statistics[CompositeChannels].mean; *standard_deviation=channel_statistics[CompositeChannels].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelKurtosis() returns the kurtosis and skewness of one or more % image channels. % % The format of the GetImageChannelKurtosis method is: % % MagickBooleanType GetImageChannelKurtosis(const Image *image, % const ChannelType channel,double *kurtosis,double *skewness, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness, exception); return(status); } MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image, const ChannelType channel,double *kurtosis,double *skewness, ExceptionInfo *exception) { double area, mean, standard_deviation, sum_squares, sum_cubes, sum_fourth_power; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *kurtosis=0.0; *skewness=0.0; area=0.0; mean=0.0; standard_deviation=0.0; sum_squares=0.0; sum_cubes=0.0; sum_fourth_power=0.0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { mean+=GetRedPixelComponent(p); sum_squares+=(double) GetRedPixelComponent(p)*GetRedPixelComponent(p); sum_cubes+=(double) GetRedPixelComponent(p)*GetRedPixelComponent(p)* GetRedPixelComponent(p); sum_fourth_power+=(double) GetRedPixelComponent(p)* GetRedPixelComponent(p)*GetRedPixelComponent(p)* GetRedPixelComponent(p); area++; } if ((channel & GreenChannel) != 0) { mean+=GetGreenPixelComponent(p); sum_squares+=(double) GetGreenPixelComponent(p)* GetGreenPixelComponent(p); sum_cubes+=(double) GetGreenPixelComponent(p)* GetGreenPixelComponent(p)*GetGreenPixelComponent(p); sum_fourth_power+=(double) GetGreenPixelComponent(p)* GetGreenPixelComponent(p)*GetGreenPixelComponent(p)* GetGreenPixelComponent(p); area++; } if ((channel & BlueChannel) != 0) { mean+=GetBluePixelComponent(p); sum_squares+=(double) GetBluePixelComponent(p)* GetBluePixelComponent(p); sum_cubes+=(double) GetBluePixelComponent(p)*GetBluePixelComponent(p)* GetBluePixelComponent(p); sum_fourth_power+=(double) GetBluePixelComponent(p)* GetBluePixelComponent(p)*GetBluePixelComponent(p)* GetBluePixelComponent(p); area++; } if ((channel & OpacityChannel) != 0) { mean+=GetOpacityPixelComponent(p); sum_squares+=(double) GetOpacityPixelComponent(p)* GetOpacityPixelComponent(p); sum_cubes+=(double) GetOpacityPixelComponent(p)* GetOpacityPixelComponent(p)*GetOpacityPixelComponent(p); sum_fourth_power+=(double) GetOpacityPixelComponent(p)* GetOpacityPixelComponent(p)*GetOpacityPixelComponent(p)* GetOpacityPixelComponent(p); area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { mean+=GetIndexPixelComponent(indexes+x); sum_squares+=(double) GetIndexPixelComponent(indexes+x)* GetIndexPixelComponent(indexes+x); sum_cubes+=(double) GetIndexPixelComponent(indexes+x)* GetIndexPixelComponent(indexes+x)*GetIndexPixelComponent(indexes+x); sum_fourth_power+=(double) GetIndexPixelComponent(indexes+x)* GetIndexPixelComponent(indexes+x)*GetIndexPixelComponent(indexes+x)* GetIndexPixelComponent(indexes+x); area++; } p++; } } if (y < (ssize_t) image->rows) return(MagickFalse); if (area != 0.0) { mean/=area; sum_squares/=area; sum_cubes/=area; sum_fourth_power/=area; } standard_deviation=sqrt(sum_squares-(mean*mean)); if (standard_deviation != 0.0) { *kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares- 3.0*mean*mean*mean*mean; *kurtosis/=standard_deviation*standard_deviation*standard_deviation* standard_deviation; *kurtosis-=3.0; *skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean; *skewness/=standard_deviation*standard_deviation*standard_deviation; } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelRange() returns the range of one or more image channels. % % The format of the GetImageChannelRange method is: % % MagickBooleanType GetImageChannelRange(const Image *image, % const ChannelType channel,double *minima,double *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image, double *minima,double *maxima,ExceptionInfo *exception) { return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelRange(const Image *image, const ChannelType channel,double *minima,double *maxima, ExceptionInfo *exception) { MagickPixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *maxima=(-1.0E-37); *minima=1.0E+37; GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((channel & RedChannel) != 0) { if (pixel.red < *minima) *minima=(double) pixel.red; if (pixel.red > *maxima) *maxima=(double) pixel.red; } if ((channel & GreenChannel) != 0) { if (pixel.green < *minima) *minima=(double) pixel.green; if (pixel.green > *maxima) *maxima=(double) pixel.green; } if ((channel & BlueChannel) != 0) { if (pixel.blue < *minima) *minima=(double) pixel.blue; if (pixel.blue > *maxima) *maxima=(double) pixel.blue; } if ((channel & OpacityChannel) != 0) { if (pixel.opacity < *minima) *minima=(double) pixel.opacity; if (pixel.opacity > *maxima) *maxima=(double) pixel.opacity; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((double) GetIndexPixelComponent(indexes+x) < *minima) *minima=(double) GetIndexPixelComponent(indexes+x); if ((double) GetIndexPixelComponent(indexes+x) > *maxima) *maxima=(double) GetIndexPixelComponent(indexes+x); } p++; } } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelStatistics() returns statistics for each channel in the % image. The statistics include the channel depth, its minima, maxima, mean, % standard deviation, kurtosis and skewness. You can access the red channel % mean, for example, like this: % % channel_statistics=GetImageChannelStatistics(image,exception); % red_mean=channel_statistics[RedChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageChannelStatistics method is: % % ChannelStatistics *GetImageChannelStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area; MagickStatusType status; QuantumAny range; register ssize_t i; size_t channels, depth, length; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length, sizeof(*channel_statistics)); if (channel_statistics == (ChannelStatistics *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_statistics,0,length* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) CompositeChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-1.0E-37); channel_statistics[i].minima=1.0E+37; } for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; ) { if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[RedChannel].depth; range=GetQuantumRange(depth); status=GetRedPixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetRedPixelComponent(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[RedChannel].depth++; continue; } } if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[GreenChannel].depth; range=GetQuantumRange(depth); status=GetGreenPixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetGreenPixelComponent(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[GreenChannel].depth++; continue; } } if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlueChannel].depth; range=GetQuantumRange(depth); status=GetBluePixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetBluePixelComponent(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[BlueChannel].depth++; continue; } } if (image->matte != MagickFalse) { if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[OpacityChannel].depth; range=GetQuantumRange(depth); status=GetOpacityPixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetOpacityPixelComponent(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[OpacityChannel].depth++; continue; } } } if (image->colorspace == CMYKColorspace) { if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlackChannel].depth; range=GetQuantumRange(depth); status=GetIndexPixelComponent(indexes+x) != ScaleAnyToQuantum(ScaleQuantumToAny(GetIndexPixelComponent( indexes+x),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[BlackChannel].depth++; continue; } } } if ((double) GetRedPixelComponent(p) < channel_statistics[RedChannel].minima) channel_statistics[RedChannel].minima=(double) GetRedPixelComponent(p); if ((double) GetRedPixelComponent(p) > channel_statistics[RedChannel].maxima) channel_statistics[RedChannel].maxima=(double) GetRedPixelComponent(p); channel_statistics[RedChannel].sum+=GetRedPixelComponent(p); channel_statistics[RedChannel].sum_squared+=(double) GetRedPixelComponent(p)* GetRedPixelComponent(p); channel_statistics[RedChannel].sum_cubed+=(double) GetRedPixelComponent(p)*GetRedPixelComponent(p)* GetRedPixelComponent(p); channel_statistics[RedChannel].sum_fourth_power+=(double) GetRedPixelComponent(p)*GetRedPixelComponent(p)* GetRedPixelComponent(p)*GetRedPixelComponent(p); if ((double) GetGreenPixelComponent(p) < channel_statistics[GreenChannel].minima) channel_statistics[GreenChannel].minima=(double) GetGreenPixelComponent(p); if ((double) GetGreenPixelComponent(p) > channel_statistics[GreenChannel].maxima) channel_statistics[GreenChannel].maxima=(double) GetGreenPixelComponent(p); channel_statistics[GreenChannel].sum+=GetGreenPixelComponent(p); channel_statistics[GreenChannel].sum_squared+=(double) GetGreenPixelComponent(p)*GetGreenPixelComponent(p); channel_statistics[GreenChannel].sum_cubed+=(double) GetGreenPixelComponent(p)*GetGreenPixelComponent(p)* GetGreenPixelComponent(p); channel_statistics[GreenChannel].sum_fourth_power+=(double) GetGreenPixelComponent(p)*GetGreenPixelComponent(p)* GetGreenPixelComponent(p)*GetGreenPixelComponent(p); if ((double) GetBluePixelComponent(p) < channel_statistics[BlueChannel].minima) channel_statistics[BlueChannel].minima=(double) GetBluePixelComponent(p); if ((double) GetBluePixelComponent(p) > channel_statistics[BlueChannel].maxima) channel_statistics[BlueChannel].maxima=(double) GetBluePixelComponent(p); channel_statistics[BlueChannel].sum+=GetBluePixelComponent(p); channel_statistics[BlueChannel].sum_squared+=(double) GetBluePixelComponent(p)*GetBluePixelComponent(p); channel_statistics[BlueChannel].sum_cubed+=(double) GetBluePixelComponent(p)*GetBluePixelComponent(p)* GetBluePixelComponent(p); channel_statistics[BlueChannel].sum_fourth_power+=(double) GetBluePixelComponent(p)*GetBluePixelComponent(p)* GetBluePixelComponent(p)*GetBluePixelComponent(p); if (image->matte != MagickFalse) { if ((double) GetOpacityPixelComponent(p) < channel_statistics[OpacityChannel].minima) channel_statistics[OpacityChannel].minima=(double) GetOpacityPixelComponent(p); if ((double) GetOpacityPixelComponent(p) > channel_statistics[OpacityChannel].maxima) channel_statistics[OpacityChannel].maxima=(double) GetOpacityPixelComponent(p); channel_statistics[OpacityChannel].sum+=GetOpacityPixelComponent(p); channel_statistics[OpacityChannel].sum_squared+=(double) GetOpacityPixelComponent(p)*GetOpacityPixelComponent(p); channel_statistics[OpacityChannel].sum_cubed+=(double) GetOpacityPixelComponent(p)*GetOpacityPixelComponent(p)* GetOpacityPixelComponent(p); channel_statistics[OpacityChannel].sum_fourth_power+=(double) GetOpacityPixelComponent(p)*GetOpacityPixelComponent(p)* GetOpacityPixelComponent(p)*GetOpacityPixelComponent(p); } if (image->colorspace == CMYKColorspace) { if ((double) GetIndexPixelComponent(indexes+x) < channel_statistics[BlackChannel].minima) channel_statistics[BlackChannel].minima=(double) GetIndexPixelComponent(indexes+x); if ((double) GetIndexPixelComponent(indexes+x) > channel_statistics[BlackChannel].maxima) channel_statistics[BlackChannel].maxima=(double) GetIndexPixelComponent(indexes+x); channel_statistics[BlackChannel].sum+= GetIndexPixelComponent(indexes+x); channel_statistics[BlackChannel].sum_squared+=(double) GetIndexPixelComponent(indexes+x)*GetIndexPixelComponent(indexes+x); channel_statistics[BlackChannel].sum_cubed+=(double) GetIndexPixelComponent(indexes+x)*GetIndexPixelComponent(indexes+x)* GetIndexPixelComponent(indexes+x); channel_statistics[BlackChannel].sum_fourth_power+=(double) GetIndexPixelComponent(indexes+x)*GetIndexPixelComponent(indexes+x)* GetIndexPixelComponent(indexes+x)*GetIndexPixelComponent(indexes+x); } x++; p++; } } area=(double) image->columns*image->rows; for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[i].sum/=area; channel_statistics[i].sum_squared/=area; channel_statistics[i].sum_cubed/=area; channel_statistics[i].sum_fourth_power/=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; channel_statistics[i].standard_deviation=sqrt( channel_statistics[i].variance-(channel_statistics[i].mean* channel_statistics[i].mean)); } for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[CompositeChannels].depth=(size_t) MagickMax((double) channel_statistics[CompositeChannels].depth,(double) channel_statistics[i].depth); channel_statistics[CompositeChannels].minima=MagickMin( channel_statistics[CompositeChannels].minima, channel_statistics[i].minima); channel_statistics[CompositeChannels].maxima=MagickMax( channel_statistics[CompositeChannels].maxima, channel_statistics[i].maxima); channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum; channel_statistics[CompositeChannels].sum_squared+= channel_statistics[i].sum_squared; channel_statistics[CompositeChannels].sum_cubed+= channel_statistics[i].sum_cubed; channel_statistics[CompositeChannels].sum_fourth_power+= channel_statistics[i].sum_fourth_power; channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean; channel_statistics[CompositeChannels].variance+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; } channels=3; if (image->matte != MagickFalse) channels++; if (image->colorspace == CMYKColorspace) channels++; channel_statistics[CompositeChannels].sum/=channels; channel_statistics[CompositeChannels].sum_squared/=channels; channel_statistics[CompositeChannels].sum_cubed/=channels; channel_statistics[CompositeChannels].sum_fourth_power/=channels; channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].variance/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); channel_statistics[CompositeChannels].kurtosis/=channels; channel_statistics[CompositeChannels].skewness/=channels; for (i=0; i <= (ssize_t) CompositeChannels; i++) { if (channel_statistics[i].standard_deviation == 0.0) continue; channel_statistics[i].skewness=(channel_statistics[i].sum_cubed- 3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+ 2.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power- 4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+ 6.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation)-3.0; } return(channel_statistics); }
test_openmp.c
#include "config.h" /* getopt needs _POSIX_C_SOURCE 2 */ #define _POSIX_C_SOURCE 2 #include <ctype.h> #include <limits.h> #include <math.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #if defined(_MSC_VER) #include "wingetopt/src/getopt.h" #else #include <unistd.h> #endif #if defined(_OPENMP) #include <omp.h> #endif #if HAVE_SSE2 #include "ssw.h" #endif #include "parasail.h" #include "parasail/io.h" #include "parasail/memory.h" #include "parasail/stats.h" /*#include "timer.h"*/ #include "timer_real.h" #if HAVE_SSE2 static parasail_result_t* ssw_( const char * const restrict s1, const int s1_len, const char * const restrict s2, const int s2_len, const int open, const int gap, const parasail_matrix_t * pmatrix, int score_size) { parasail_result_t *result = parasail_result_new(); s_profile *profile = NULL; int8_t *s1_num = (int8_t*)malloc(sizeof(int8_t) * s1_len); int8_t *s2_num = (int8_t*)malloc(sizeof(int8_t) * s2_len); int8_t *matrix = (int8_t*)malloc(sizeof(int8_t) * 24 * 24); s_align *ssw_result = NULL; int m = 0; /* This table is used to transform amino acid letters into numbers. */ static const int8_t table[128] = { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23, 14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23, 23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23, 14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23 }; /* initialize score matrix */ for (m = 0; m < s1_len; ++m) s1_num[m] = table[(int)s1[m]]; for (m = 0; m < s2_len; ++m) s2_num[m] = table[(int)s2[m]]; for (m = 0; m < 24*24; ++m) matrix[m] = pmatrix->matrix[m]; profile = ssw_init(s1_num, s1_len, matrix, 24, score_size); ssw_result = ssw_align(profile, s2_num, s2_len, -open, -gap, 2, 0, 0, s1_len/2); result->score = ssw_result->score1; if (ssw_result->saturated) { result->flag |= PARASAIL_FLAG_SATURATED; } align_destroy(ssw_result); init_destroy(profile); free(s1_num); free(s2_num); free(matrix); return result; } static parasail_result_t* ssw( const char * const restrict s1, const int s1_len, const char * const restrict s2, const int s2_len, const int open, const int gap, const parasail_matrix_t *matrix) { return ssw_(s1, s1_len, s2, s2_len, open, gap, matrix, 2); } static parasail_result_t* ssw_16( const char * const restrict s1, const int s1_len, const char * const restrict s2, const int s2_len, const int open, const int gap, const parasail_matrix_t *matrix) { return ssw_(s1, s1_len, s2, s2_len, open, gap, matrix, 1); } #endif static inline unsigned long binomial_coefficient(unsigned long n, unsigned long k) { /* from http://blog.plover.com/math/choose.html */ unsigned long r = 1; unsigned long d; if (k > n) { return 0; } for (d = 1; d <= k; d++) { r *= n--; r /= d; } return r; } static inline void k_combination2(unsigned long pos, unsigned long *a, unsigned long *b) { double s; double i = floor(sqrt(2.0 * pos)) - 1.0; if (i <= 1.0) { i = 1.0; } s = i * (i - 1.0) / 2.0; while (pos - s >= i) { s += i; i += 1; } *a = (unsigned long)(pos - s); *b = (unsigned long)(i); } int main(int argc, char **argv) { double timer_clock = 0.0; unsigned long i = 0; size_t limit = 0; char *filename_database = NULL; parasail_sequences_t *sequences_database = NULL; size_t seq_count_database = 0; char *filename_queries = NULL; parasail_sequences_t *sequences_queries = NULL; size_t seq_count_queries = 0; char *endptr = NULL; char *funcname1 = NULL; char *funcname2 = NULL; parasail_function_t *function1 = NULL; parasail_function_t *function2 = NULL; int banded = 0; int kbandsize = 3; int c = 0; char *matrixname = "blosum62"; const parasail_matrix_t *matrix = NULL; int gap_open = 10; int gap_extend = 1; int N = 1; int saturated = 0; int smallest_first = 0; int biggest_first = 0; int truncate = 0; int iterations = 1; int func_cutoff = 0; int iter = 0; stats_t stats_time; stats_clear(&stats_time); while ((c = getopt(argc, argv, "a:A:c:b:f:k:q:o:e:slt:i:")) != -1) { switch (c) { case 'a': funcname1 = optarg; break; case 'A': funcname2 = optarg; break; case 'b': matrixname = optarg; break; case 'c': errno = 0; func_cutoff = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'f': filename_database = optarg; break; case 'q': filename_queries = optarg; break; case 'k': errno = 0; kbandsize = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'i': errno = 0; iterations = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'o': errno = 0; gap_open = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'e': errno = 0; gap_extend = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 's': smallest_first = 1; break; case 'l': biggest_first = 1; break; case 't': errno = 0; truncate = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case '?': if (optopt == 'a' || optopt == 'b' || optopt == 'e' || optopt == 'f' || optopt == 'i' || optopt == 'n' || optopt == 'o' || optopt == 't') { fprintf(stderr, "Option -%c requires an argument.\n", optopt); } else if (isprint(optopt)) { fprintf(stderr, "Unknown option `-%c'.\n", optopt); } else { fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt); } exit(1); default: fprintf(stderr, "default case in getopt\n"); exit(1); } } if (smallest_first && biggest_first) { fprintf(stderr, "cannot choose both smallest and biggest first\n"); exit(1); } /* select the function */ if (funcname1) { function1 = parasail_lookup_function(funcname1); #if HAVE_SSE2 if (NULL == function1) { if (0 == strcmp(funcname1, "ssw_16")) { function1 = ssw_16; } else if (0 == strcmp(funcname1, "ssw_8")) { function1 = ssw; } } #endif if (NULL == function1 && NULL != strstr(funcname1, "nw_banded")) { banded = 1; } if (NULL == function1 && 0 == banded) { fprintf(stderr, "Specified function1 not found.\n"); exit(1); } } else { fprintf(stderr, "No alignment function1 specified.\n"); exit(1); } if (funcname2) { function2 = parasail_lookup_function(funcname2); if (NULL == function2) { fprintf(stderr, "Specified function2 not found.\n"); exit(1); } } /* select the substitution matrix */ if (matrixname) { matrix = parasail_matrix_lookup(matrixname); if (NULL == matrix) { fprintf(stderr, "Specified substitution matrix not found.\n"); exit(1); } } if (filename_database) { sequences_database = parasail_sequences_from_file(filename_database); seq_count_database = sequences_database->l; } else { fprintf(stderr, "missing database filename\n"); exit(1); } limit = binomial_coefficient(seq_count_database, 2); /*printf("%lu choose 2 is %lu\n", seq_count_database, limit);*/ #if defined(_OPENMP) #pragma omp parallel { #pragma omp single { N = omp_get_max_threads(); /*printf("omp_get_max_threads()=%d\n", N);*/ } } #endif if (filename_queries) { double total_timer = 0.0; sequences_queries = parasail_sequences_from_file(filename_queries); seq_count_queries = sequences_queries->l; for (i=0; i<seq_count_queries; ++i) { int saturated_query = 0; double local_timer = 0.0; parasail_function_t *function = function1; if (func_cutoff > 0) { if (sequences_queries->seqs[i].seq.l > (unsigned long)func_cutoff) { function = function2; } } local_timer = timer_real(); #pragma omp parallel { long long int seq_count_database_signed = seq_count_database; long long int j_signed = 0; #pragma omp for schedule(guided) for (j_signed=0; j_signed<seq_count_database_signed; ++j_signed) { parasail_result_t *result = function( sequences_queries->seqs[i].seq.s, sequences_queries->seqs[i].seq.l, sequences_database->seqs[j_signed].seq.s, sequences_database->seqs[j_signed].seq.l, gap_open, gap_extend, matrix); #pragma omp atomic saturated_query += parasail_result_is_saturated(result); parasail_result_free(result); } } local_timer = timer_real() - local_timer; total_timer += local_timer; printf("%lu\t %lu\t %d\t %f\n", i, (unsigned long)sequences_queries->seqs[i].seq.l, saturated_query, local_timer); fflush(stdout); } printf("total_time=%f\n", total_timer); fflush(stdout); } else { for (iter=0; iter<iterations; ++iter) { timer_clock = timer_real(); #pragma omp parallel { unsigned long a=0; unsigned long b=1; unsigned long swap=0; long long i_signed = 0; long long limit_signed = limit; #pragma omp for schedule(guided) for (i_signed=0; i_signed<limit_signed; ++i_signed) { parasail_function_t *function = function1; parasail_result_t *result = NULL; unsigned long query_size; k_combination2(i_signed, &a, &b); if (smallest_first) { if (sequences_database->seqs[a].seq.l > sequences_database->seqs[b].seq.l) { swap = a; a = b; b = swap; } } else if (biggest_first) { if (sequences_database->seqs[a].seq.l < sequences_database->seqs[b].seq.l) { swap = a; a = b; b = swap; } } query_size = sequences_database->seqs[a].seq.l; if (truncate > 0) { if (query_size > (unsigned long)truncate) { query_size = truncate; } } if (func_cutoff > 0) { if (query_size > (unsigned long)func_cutoff) { function = function2; } } if (NULL != function1) { result = function( sequences_database->seqs[a].seq.s, query_size, sequences_database->seqs[b].seq.s, sequences_database->seqs[b].seq.l, gap_open, gap_extend, matrix); } else if (0 != banded) { result = parasail_nw_banded( sequences_database->seqs[a].seq.s, query_size, sequences_database->seqs[b].seq.s, sequences_database->seqs[b].seq.l, gap_open, gap_extend, kbandsize, matrix); } #pragma omp atomic saturated += parasail_result_is_saturated(result); if (parasail_result_is_saturated(result)) { #pragma omp critical(printer) { printf("saturated -- (%lu,%lu)\n", a, b); } } parasail_result_free(result); } } timer_clock = timer_real() - timer_clock; stats_sample_value(&stats_time, timer_clock); } printf("%s\t %s\t %d\t %d\t %d\t %d\t %f\t %f\t %f\t %f\n", funcname1, matrixname, gap_open, gap_extend, N, saturated, stats_time._mean, stats_stddev(&stats_time), stats_time._min, stats_time._max); fflush(stdout); } return 0; }
mode_op.h
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { template <typename T, typename Type> static void getMode(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } std::sort(col_vec.begin(), col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); T mode = 0; int64_t indice = 0; int64_t cur_freq = 0; int64_t max_freq = 0; for (int64_t i = 0; i < input_width; ++i) { ++cur_freq; if (i == input_width - 1 || (col_vec[i + 1].first != col_vec[i].first)) { if (cur_freq > max_freq) { max_freq = cur_freq; mode = col_vec[i].first; indice = col_vec[i].second; } cur_freq = 0; } } t_out[i] = mode; t_indices[i] = indice; } } template <typename T, typename Type> static void ModeAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); output_data[i * input_width + e_indices(0)] = e_input(0); } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0); } } } template <typename DeviceContext, typename T> class ModeCPUKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* input = context.Input<framework::Tensor>("X"); auto* output = context.Output<framework::Tensor>("Out"); auto* indices = context.Output<framework::Tensor>("Indices"); const auto& in_dims = input->dims(); bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); auto out_dims = output->dims(); // if axis is not the last dim, transpose it to the last dim, do the // calculation, // then tranpose it back to orginal axis. if (axis == in_dims.size() - 1) { const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; getMode<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data); } else { std::vector<int> trans_axis; for (int i = 0; i < axis; i++) { trans_axis.emplace_back(i); } trans_axis.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans_axis.emplace_back(i); } trans_axis.emplace_back(axis); if (!keepdim) { std::vector<int> tmp_out_shape; for (int i = 0; i < axis; i++) { tmp_out_shape.emplace_back(in_dims[i]); } tmp_out_shape.emplace_back(1); for (int i = axis + 1; i < in_dims.size(); i++) { tmp_out_shape.emplace_back(in_dims[i]); } framework::DDim tmp_out_dim = phi::make_ddim(tmp_out_shape); output->Resize(tmp_out_dim); indices->Resize(tmp_out_dim); } // get the trans input_dims, out_dims framework::DDim trans_shape(in_dims); framework::DDim trans_out_shape(in_dims); for (size_t i = 0; i < trans_axis.size(); i++) { trans_shape[i] = in_dims[trans_axis[i]]; trans_out_shape[i] = in_dims[trans_axis[i]]; } trans_out_shape[in_dims.size() - 1] = 1; framework::Tensor trans_input; trans_input.mutable_data<T>(trans_shape, context.GetPlace()); int ndims = trans_axis.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_input, trans_axis); const int64_t input_height = phi::product(phi::slice_ddim(trans_shape, 0, trans_shape.size() - 1)); const int64_t input_width = trans_shape[trans_shape.size() - 1]; framework::Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_shape, context.GetPlace()); framework::Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_shape, context.GetPlace()); getMode<T, int64_t>(input_height, input_width, in_dims.size(), &trans_input, t_out, t_ind); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans_axis); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans_axis); if (!keepdim) { output->Resize(out_dims); indices->Resize(out_dims); } } } }; template <typename DeviceContext, typename T> class ModeGradCPUKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<framework::Tensor>("X"); auto* out_grad = context.Input<framework::Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<framework::Tensor>("Indices"); auto* x_grad = context.Output<framework::Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim")); auto in_dims = x->dims(); auto out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; if (!keepdim) { std::vector<int> tmp_out_shape; for (int i = 0; i < axis; i++) { tmp_out_shape.emplace_back(out_dims[i]); } tmp_out_shape.emplace_back(1); for (int i = axis + 1; i < in_dims.size(); i++) { tmp_out_shape.emplace_back(out_dims[i - 1]); } out_dims = phi::make_ddim(tmp_out_shape); } T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis == in_dims.size() - 1) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad if (keepdim) { ModeAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data); } else { auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); framework::Tensor out_grad_tmp; framework::Tensor indices_tmp; out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace()); indices_tmp.mutable_data<int64_t>(indices->dims(), dev_context.GetPlace()); framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context, &out_grad_tmp); framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context, &indices_tmp); out_grad_tmp.Resize(out_dims); indices_tmp.Resize(out_dims); ModeAssign(input_height, input_width, in_dims.size(), &out_grad_tmp, &indices_tmp, x_grad_data); } } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans_axis; for (int i = 0; i < axis; i++) { trans_axis.emplace_back(i); } trans_axis.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans_axis.emplace_back(i); } trans_axis.emplace_back(axis); framework::DDim trans_shape(out_dims); framework::DDim trans_in_shape(in_dims); for (size_t i = 0; i < trans_axis.size(); i++) { trans_shape[i] = out_dims[trans_axis[i]]; trans_in_shape[i] = in_dims[trans_axis[i]]; } // transpose the out_grad, indices framework::Tensor trans_dO; trans_dO.mutable_data<T>(trans_shape, context.GetPlace()); framework::Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_shape, context.GetPlace()); int ndims = trans_axis.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); if (keepdim) { // Do transpose TransCompute<platform::CPUDeviceContext, T>( ndims, dev_context, *out_grad, &trans_dO, trans_axis); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans_axis); } else { framework::Tensor out_grad_tmp; framework::Tensor indices_tmp; out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace()); indices_tmp.mutable_data<int64_t>(indices->dims(), dev_context.GetPlace()); framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context, &out_grad_tmp); framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context, &indices_tmp); out_grad_tmp.Resize(out_dims); indices_tmp.Resize(out_dims); // Do transpose TransCompute<platform::CPUDeviceContext, T>( ndims, dev_context, out_grad_tmp, &trans_dO, trans_axis); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, indices_tmp, &trans_ind, trans_axis); } const int64_t input_height = phi::product( phi::slice_ddim(trans_in_shape, 0, trans_in_shape.size() - 1)); const int64_t input_width = trans_in_shape[trans_in_shape.size() - 1]; // Assign the out_grad to tranpose input_grad framework::Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_shape, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); ModeAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans_axis); } } }; } // namespace operators } // namespace paddle
matProduct1.c
/****************************************************************************** * FILE: omp_mm.c * DESCRIPTION: * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. Threads share row iterations * according to a predefined chunk size. * AUTHOR: Blaise Barney * LAST REVISED: 06/28/05 ******************************************************************************/ /** * This program performs the multiplication of two matrix's. * Online source: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c **/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #ifdef _CIVL $input int NRA=5; /* number of rows in matrix A */ $input int NCA=5; /* number of columns in matrix A */ $input int NCB=5; /* number of columns in matrix B */ #else #define NRA 8 /* number of rows in matrix A */ #define NCA 8 /* number of columns in matrix A */ #define NCB 8 /* number of columns in matrix B */ #endif int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ chunk = 10; /* set loop iteration chunk size */ /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; #pragma omp for schedule (static, chunk) for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n",tid); #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) { printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } /*** End of parallel region ***/ /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); }
GB_unop__trunc_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__trunc_fp32_fp32 // op(A') function: GB_unop_tran__trunc_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = truncf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = truncf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = truncf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__trunc_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = truncf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__trunc_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nt2_fmt_plug.c
/* * NT-ng format, using intrinsics. * * This software is Copyright 2011, 2012 magnum, and it is hereby released to * the general public under the following terms: Redistribution and use in * source and binary forms, with or without modification, are permitted. * * Losely based on rawSHA1, by bartavelle * and is also using his mmx/sse2/simd-intrinsics functions * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NT2; #elif FMT_REGISTERS_H john_register_one(&fmt_NT2); #else #include <string.h> #include "arch.h" //#undef SIMD_COEF_32 //#undef SIMD_PARA_MD4 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4) #endif #include "md4.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "memory.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "NT" #define FORMAT_NAME "" #define ALGORITHM_NAME "MD4 " MD4_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define FORMAT_TAG "$NT$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define DIGEST_SIZE 16 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef SIMD_COEF_32 #if defined(_OPENMP) #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // tuned for i7 w/o HT #endif #endif #define PLAINTEXT_LENGTH 27 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char *source(char *source, void *binary) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; ARCH_WORD_32 b[4]; char *p; int i, j; memcpy(b, binary, sizeof(b)); #if SIMD_COEF_32 && defined(REVERSE_STEPS) md4_unreverse(b); #endif #if ARCH_LITTLE_ENDIAN==0 alter_endianity(b, 16); #endif p = &out[TAG_LENGTH]; for (i = 0; i < 4; i++) for (j = 0; j < 8; j++) *p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf]; return out; } #ifdef SIMD_COEF_32 static unsigned char (*saved_key); static unsigned char (*crypt_key); static unsigned int (**buf_ptr); #else static MD4_CTX ctx; static int saved_len; static UTF16 saved_key[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 crypt_key[DIGEST_SIZE / 4]; #endif // Note: the ISO-8859-1 plaintexts will be replaced in init() if running UTF-8 static struct fmt_tests tests[] = { {"b7e4b9022cd45f275334bbdb83bb5be5", "John the Ripper"}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$7a21990fcd3d759941e45c490f143d5f", "12345"}, {"$NT$f9e37e83b83c47a93c2f09f66408631b", "abc123"}, {"$NT$8846f7eaee8fb117ad06bdd830b7586c", "password"}, {"$NT$2b2ac2d1c7c8fda6cea80b5fad7563aa", "computer"}, {"$NT$32ed87bdb5fdc5e9cba88547376818d4", "123456"}, {"$NT$b7e0ea9fbffcf6dd83086e905089effd", "tigger"}, {"$NT$7ce21f17c0aee7fb9ceba532d0546ad6", "1234"}, {"$NT$b23a90d0aad9da3615fafc27a1b8baeb", "a1b2c3"}, {"$NT$2d20d252a479f485cdf5e171d93985bf", "qwerty"}, {"$NT$3dbde697d71690a769204beb12283678", "123"}, {"$NT$c889c75b7c1aae1f7150c5681136e70e", "xxx"}, {"$NT$d5173c778e0f56d9fc47e3b3c829aca7", "money"}, {"$NT$0cb6948805f797bf2a82807973b89537", "test"}, {"$NT$0569fcf2b14b9c7f3d3b5f080cbd85e5", "carmen"}, {"$NT$f09ab1733a528f430353834152c8a90e", "mickey"}, {"$NT$878d8014606cda29677a44efa1353fc7", "secret"}, {"$NT$85ac333bbfcbaa62ba9f8afb76f06268", "summer"}, {"$NT$5962cc080506d90be8943118f968e164", "internet"}, {"$NT$f07206c3869bda5acd38a3d923a95d2a", "service"}, {"$NT$d0dfc65e8f286ef82f6b172789a0ae1c", "canada"}, {"$NT$066ddfd4ef0e9cd7c256fe77191ef43c", "hello"}, {"$NT$39b8620e745b8aa4d1108e22f74f29e2", "ranger"}, {"$NT$8d4ef8654a9adc66d4f628e94f66e31b", "shadow"}, {"$NT$320a78179516c385e35a93ffa0b1c4ac", "baseball"}, {"$NT$e533d171ac592a4e70498a58b854717c", "donald"}, {"$NT$5eee54ce19b97c11fd02e531dd268b4c", "harley"}, {"$NT$6241f038703cbfb7cc837e3ee04f0f6b", "hockey"}, {"$NT$becedb42ec3c5c7f965255338be4453c", "letmein"}, {"$NT$ec2c9f3346af1fb8e4ee94f286bac5ad", "maggie"}, {"$NT$f5794cbd75cf43d1eb21fad565c7e21c", "mike"}, {"$NT$74ed32086b1317b742c3a92148df1019", "mustang"}, {"$NT$63af6e1f1dd9ecd82f17d37881cb92e6", "snoopy"}, {"$NT$58def5844fe58e8f26a65fff9deb3827", "buster"}, {"$NT$f7eb9c06fafaa23c4bcf22ba6781c1e2", "dragon"}, {"$NT$dd555241a4321657e8b827a40b67dd4a", "jordan"}, {"$NT$bb53a477af18526ada697ce2e51f76b3", "michael"}, {"$NT$92b7b06bb313bf666640c5a1e75e0c18", "michelle"}, {NULL} }; static void set_key_utf8(char *_key, int index); static void set_key_CP(char *_key, int index); static void init(struct fmt_main *self) { #if SIMD_COEF_32 int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (options.target_enc == UTF_8) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_utf8; #if SIMD_COEF_32 /* kick it up from 27. We will truncate in setkey_utf8() */ self->params.plaintext_length = 3 * PLAINTEXT_LENGTH; #endif tests[1].plaintext = "\xC3\xBC"; // German u-umlaut in UTF-8 tests[1].ciphertext = "$NT$8bd6e4fb88e01009818749c5443ea712"; tests[2].plaintext = "\xC3\xBC\xC3\xBC"; // two of them tests[2].ciphertext = "$NT$cc1260adb6985ca749f150c7e0b22063"; tests[3].plaintext = "\xE2\x82\xAC"; // euro sign tests[3].ciphertext = "$NT$030926b781938db4365d46adc7cfbcb8"; tests[4].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; tests[4].ciphertext = "$NT$682467b963bb4e61943e170a04f7db46"; } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_CP; } if (CP_to_Unicode[0xfc] == 0x00fc) { tests[1].plaintext = "\xFC"; // German u-umlaut in UTF-8 tests[1].ciphertext = "$NT$8bd6e4fb88e01009818749c5443ea712"; tests[2].plaintext = "\xFC\xFC"; // two of them tests[2].ciphertext = "$NT$cc1260adb6985ca749f150c7e0b22063"; tests[3].plaintext = "\xFC\xFC\xFC"; // 3 of them tests[3].ciphertext = "$NT$2e583e8c210fb101994c19877ac53b89"; tests[4].plaintext = "\xFC\xFC\xFC\xFC"; tests[4].ciphertext = "$NT$243bb98e7704797f92b1dd7ded6da0d0"; } } #if SIMD_COEF_32 saved_key = mem_calloc_align(64 * self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(DIGEST_SIZE * self->params.max_keys_per_crypt, sizeof(*crypt_key), MEM_ALIGN_SIMD); buf_ptr = mem_calloc(self->params.max_keys_per_crypt, sizeof(*buf_ptr)); for (i=0; i<self->params.max_keys_per_crypt; i++) buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)]; #endif } static void done(void) { #if SIMD_COEF_32 MEM_FREE(buf_ptr); MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[37]; if (!strncmp(ciphertext,FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(&out[TAG_LENGTH], ciphertext, 32); out[36] = 0; strlwr(&out[TAG_LENGTH]); return out; } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH) return 1; else return 0; } // here to 'handle' the pwdump files: user:uid:lmhash:ntlmhash::: // Note, we address the user id inside loader. static char *prepare(char *split_fields[10], struct fmt_main *self) { static char out[33+TAG_LENGTH+1]; if (!valid(split_fields[1], self)) { if (split_fields[3] && strlen(split_fields[3]) == 32) { sprintf(out, "%s%s", FORMAT_TAG, split_fields[3]); if (valid(out,self)) return out; } } return split_fields[1]; } static void *get_binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[DIGEST_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext += TAG_LENGTH; for (i=0; i<4; i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; #if ARCH_LITTLE_ENDIAN out[i]=temp; #else out[i]=JOHNSWAP(temp); #endif } #if SIMD_COEF_32 && defined(REVERSE_STEPS) md4_reverse(out); #endif //dump_stuff_msg("\nbinary", out, 16); return out; } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key; while (*s) *d++ = *s++; *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #endif // dump_stuff_msg(_key, saved_key, 24); #endif } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = enc_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef SIMD_COEF_32 const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = buf_ptr[index]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; #if NT_FULL_UNICODE if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else #endif if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += SIMD_COEF_32; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = utf8_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 // Get the key back from the key buffer, from UCS-2 unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1]; unsigned int md4_size=0; unsigned int i=0; for(; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++) { key[md4_size] = keybuffer[i]; key[md4_size+1] = keybuffer[i] >> 16; if (key[md4_size] == 0x80 && key[md4_size+1] == 0) { key[md4_size] = 0; break; } ++md4_size; if (key[md4_size] == 0x80 && ((keybuffer[i+SIMD_COEF_32]&0xFFFF) == 0 || md4_size == PLAINTEXT_LENGTH)) { key[md4_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key); #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 int i = 0; #ifdef _OPENMP const unsigned int count = (*pcount + NBKEYS - 1) / NBKEYS; #pragma omp parallel for for (i = 0; i < count; i++) #endif SIMDmd4body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*DIGEST_SIZE], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN); #else MD4_Init( &ctx ); MD4_Update(&ctx, (unsigned char*)saved_key, saved_len); MD4_Final((unsigned char*) crypt_key, &ctx); #endif return *pcount; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y; #ifdef _OPENMP const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32; #else const unsigned int c = SIMD_PARA_MD4; #endif for(y = 0; y < c; y++) for(x = 0; x < SIMD_COEF_32; x++) { if( ((ARCH_WORD_32*)binary)[1] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x+SIMD_COEF_32] ) return 1; } return 0; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x = index&(SIMD_COEF_32-1); unsigned int y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)binary)[1] == ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32]; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_COEF_32 ARCH_WORD_32 crypt_key[DIGEST_SIZE / 4]; UTF16 u16[PLAINTEXT_LENGTH + 1]; MD4_CTX ctx; UTF8 *key = (UTF8*)get_key(index); int len = enc_to_utf16(u16, PLAINTEXT_LENGTH, key, strlen((char*)key)); if (len <= 0) len = strlen16(u16); MD4_Init(&ctx); MD4_Update(&ctx, u16, len << 1); MD4_Final((void*)crypt_key, &ctx); #ifdef REVERSE_STEPS md4_reverse(crypt_key); #endif return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE); #else return 1; #endif } #ifdef SIMD_COEF_32 #define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4+SIMD_COEF_32 static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_6; } #endif static int binary_hash_0(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_0; } static int binary_hash_1(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_1; } static int binary_hash_2(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_2; } static int binary_hash_3(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_3; } static int binary_hash_4(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_4; } static int binary_hash_5(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_5; } static int binary_hash_6(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_6; } struct fmt_main fmt_NT2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__rdiv_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp64) // A*D function (colscale): GB (_AxD__rdiv_fp64) // D*A function (rowscale): GB (_DxB__rdiv_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp64) // C=scalar+B GB (_bind1st__rdiv_fp64) // C=scalar+B' GB (_bind1st_tran__rdiv_fp64) // C=A+scalar GB (_bind2nd__rdiv_fp64) // C=A'+scalar GB (_bind2nd_tran__rdiv_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (bij / aij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y / x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FP64 || GxB_NO_RDIV_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (bij / x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (y / aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij / x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y / aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_iso_expand.c
//------------------------------------------------------------------------------ // GB_iso_expand: expand a scalar into an entire array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" void GB_iso_expand // expand an iso scalar into an entire array ( void *restrict X, // output array to expand into int64_t n, // # of entries in X void *restrict scalar, // scalar to expand into X size_t size, // size of the scalar and each entry of X GB_Context Context ) { //-------------------------------------------------------------------------- // determine how many threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // copy the value into X //-------------------------------------------------------------------------- int64_t p ; switch (size) { case GB_1BYTE : // bool, uint8, int8, and UDT of size 1 { uint8_t a0 = (*((uint8_t *) scalar)) ; uint8_t *restrict Z = (uint8_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_2BYTE : // uint16, int16, and UDT of size 2 { uint16_t a0 = (*((uint16_t *) scalar)) ; uint16_t *restrict Z = (uint16_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_4BYTE : // uint32, int32, float, and UDT of size 4 { uint32_t a0 = (*((uint32_t *) scalar)) ; uint32_t *restrict Z = (uint32_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_8BYTE : // uint64, int64, double, float complex, UDT size 8 { uint64_t a0 = (*((uint64_t *) scalar)) ; uint64_t *restrict Z = (uint64_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_16BYTE : // double complex, and UDT size 16 { GB_blob16 a0 = (*((GB_blob16 *) scalar)) ; GB_blob16 *restrict Z = (GB_blob16 *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; default : // user-defined types of arbitrary size { GB_void *restrict Z = (GB_void *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { memcpy (Z + p*size, scalar, size) ; } } break ; } }
omp3.c
#include<stdio.h> int main() { int i; omp_set_num_threads(); #pragma omp parallel for for (i = 0; i <= 15; i++) { if (omp_get_thread_num() == 0) { printf("%d\n", omp_get_num_procs()); printf("%d\n", omp_get_num_threads()); } } return 0; }
is.c
/************************************************************************* * * * N A S P A R A L L E L B E N C H M A R K S 3.3 * * * * O p e n M P V E R S I O N * * * * I S * * * ************************************************************************* * * * This benchmark is an OpenMP version of the NPB IS code. * * It is described in NAS Technical Report 99-011. * * * * Permission to use, copy, distribute and modify this software * * for any purpose with or without fee is hereby granted. We * * request, however, that all derived work reference the NAS * * Parallel Benchmarks 3.3. This software is provided "as is" * * without express or implied warranty. * * * * Information on NPB 3.3, including the technical report, the * * original specifications, source code, results and information * * on how to submit new results, is available at: * * * * http://www.nas.nasa.gov/Software/NPB/ * * * * Send comments or suggestions to npb@nas.nasa.gov * * * * NAS Parallel Benchmarks Group * * NASA Ames Research Center * * Mail Stop: T27A-1 * * Moffett Field, CA 94035-1000 * * * * E-mail: npb@nas.nasa.gov * * Fax: (650) 604-3957 * * * ************************************************************************* * * * Author: M. Yarrow * * H. Jin * * * *************************************************************************/ #include "npbparams.h" #include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "../my_include/my_include.h" /*****************************************************************/ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* To disable the use of buckets, comment out the following line */ #define USE_BUCKETS /* Uncomment below for cyclic schedule */ /*#define SCHED_CYCLIC*/ /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 16 #define MAX_KEY_LOG_2 11 #define NUM_BUCKETS_LOG_2 9 #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 20 #define MAX_KEY_LOG_2 16 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 23 #define MAX_KEY_LOG_2 19 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 25 #define MAX_KEY_LOG_2 21 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 27 #define MAX_KEY_LOG_2 23 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS D */ /*************/ #if CLASS == 'D' #define TOTAL_KEYS_LOG_2 31 #define MAX_KEY_LOG_2 27 #define NUM_BUCKETS_LOG_2 10 #endif #if CLASS == 'D' #define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2) #else #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #endif #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS TOTAL_KEYS #define SIZE_OF_BUFFERS NUM_KEYS #define MAX_ITERATIONS 10 #define TEST_ARRAY_SIZE 5 /*************************************/ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /*************************************/ #if CLASS == 'D' typedef long INT_TYPE; #else typedef int INT_TYPE; #endif /********************/ /* Some global info */ /********************/ INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /************************************/ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /************************************/ INT_TYPE key_array[SIZE_OF_BUFFERS], key_buff1[MAX_KEY], key_buff2[SIZE_OF_BUFFERS], partial_verify_vals[TEST_ARRAY_SIZE], **key_buff1_aptr = NULL; #ifdef USE_BUCKETS //INT_TYPE **bucket_size, INT_TYPE bucket_ptrs[NUM_BUCKETS]; //bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS); INT_TYPE bucket_size[1][NUM_BUCKETS]; #pragma omp threadprivate(bucket_ptrs) #endif //kai int flag1 = 0, flag2 = 0, flag3 = 0, flag4 = 0, flag5= 0, flag6 = 0, flag7 = 0, flag8 = 0; //INT_TYPE *kw_bucket_size; /**********************/ /* Partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}, D_test_index_array[TEST_ARRAY_SIZE] = {1317351170,995930646,1157283250,1503301535,1453734525}, D_test_rank_array[TEST_ARRAY_SIZE] = {1,36538729,1978098519,2145192618,2147425337}; /***********************/ /* function prototypes */ /***********************/ double randlc( double *X, double *A ); void full_verify( void ); void c_print_results( char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags ); void timer_clear( int n ); void timer_start( int n ); void timer_stop( int n ); double timer_read( int n ); /* * FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /*****************************************************************/ /************* R A N D L C ************/ /************* ************/ /************* portable random number generator ************/ /*****************************************************************/ static int KS=0; static double R23, R46, T23, T46; #pragma omp threadprivate(KS, R23, R46, T23, T46) double randlc( double *X, double *A ) { double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; if (KS == 0) { R23 = 1.0; R46 = 1.0; T23 = 1.0; T46 = 1.0; for (i=1; i<=23; i++) { R23 = 0.50 * R23; T23 = 2.0 * T23; } for (i=1; i<=46; i++) { R46 = 0.50 * R46; T46 = 2.0 * T46; } KS = 1; } /* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* Break X into two parts such that X = 2^23 * X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } /*****************************************************************/ /************ F I N D _ M Y _ S E E D ************/ /************ ************/ /************ returns parallel random number seq seed ************/ /*****************************************************************/ /* * Create a random number sequence of total length nn residing * on np number of processors. Each processor will therefore have a * subsequence of length nn/np. This routine returns that random * number which is the first random number for the subsequence belonging * to processor rank kn, and which is used as seed for proc kn ran # gen. */ double find_my_seed( int kn, /* my processor rank, 0<=kn<=num procs */ int np, /* np = num procs */ long nn, /* total num of ran numbers, all procs */ double s, /* Ran num seed, for ex.: 314159265.00 */ double a ) /* Ran num gen mult, try 1220703125.00 */ { double t1,t2; long mq,nq,kk,ik; if ( kn == 0 ) return s; mq = (nn/4 + np - 1) / np; nq = mq * 4 * kn; /* number of rans to be skipped */ t1 = s; t2 = a; kk = nq; while ( kk > 1 ) { ik = kk / 2; if( 2 * ik == kk ) { (void)randlc( &t2, &t2 ); kk = ik; } else { (void)randlc( &t1, &t2 ); kk = kk - 1; } } (void)randlc( &t1, &t2 ); return( t1 ); } /*****************************************************************/ /************* C R E A T E _ S E Q ************/ /*****************************************************************/ void create_seq( double seed, double a ) { double x, s; INT_TYPE i, k; #pragma omp parallel private(x,s,i,k) { INT_TYPE k1, k2; double an = a; int myid, num_procs; INT_TYPE mq; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); #else myid = 0; num_procs = 1; #endif mq = (NUM_KEYS + num_procs - 1) / num_procs; k1 = mq * myid; k2 = k1 + mq; if ( k2 > NUM_KEYS ) k2 = NUM_KEYS; KS = 0; s = find_my_seed( myid, num_procs, (long)4*NUM_KEYS, seed, an ); k = MAX_KEY/4; for (i=k1; i<k2; i++) { x = randlc(&s, &an); x += randlc(&s, &an); x += randlc(&s, &an); x += randlc(&s, &an); key_array[i] = k*x; } } /*omp parallel*/ } /*****************************************************************/ /***************** Allocate Working Buffer ****************/ /*****************************************************************/ void *alloc_mem( size_t size ) { void *p; p = (void *)malloc(size); if (!p) { perror("Memory allocation error"); exit(1); } return p; } void alloc_key_buff( void ) { INT_TYPE i; int num_procs; #ifdef _OPENMP num_procs = omp_get_max_threads(); #else num_procs = 1; #endif #ifdef USE_BUCKETS /* kw_bucket_size = (INT_TYPE*)alloc_mem(sizeof(INT)*NUM_BUCKETS*num_procs); //bucket_size = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); for (i = 0; i < num_procs; i++) { //bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS); } */ #pragma omp parallel for for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = 0; //printf("111\n"); #else /*USE_BUCKETS*/ key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); key_buff1_aptr[0] = key_buff1; for (i = 1; i < num_procs; i++) { key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY); } //printf("2222\n"); #endif /*USE_BUCKETS*/ } /*****************************************************************/ /************* F U L L _ V E R I F Y ************/ /*****************************************************************/ void full_verify( void ) { INT_TYPE i, j; INT_TYPE k, k1, k2; /* Now, finally, sort the keys: */ /* Copy keys into work array; keys in key_array will be reassigned. */ #ifdef USE_BUCKETS /* Buckets are already sorted. Sorting keys within each bucket */ #ifdef SCHED_CYCLIC #pragma omp parallel for private(i,j,k,k1) schedule(static,1) #else #pragma omp parallel for private(i,j,k,k1) schedule(dynamic) #endif for( j=0; j< NUM_BUCKETS; j++ ) { k1 = (j > 0)? bucket_ptrs[j-1] : 0; for ( i = k1; i < bucket_ptrs[j]; i++ ) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } } #else #pragma omp parallel private(i,j,k,k1,k2) { #pragma omp for for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = key_array[i]; /* This is actual sorting. Each thread is responsible for a subset of key values */ j = omp_get_num_threads(); j = (MAX_KEY + j - 1) / j; k1 = j * omp_get_thread_num(); k2 = k1 + j; if (k2 > MAX_KEY) k2 = MAX_KEY; for( i=0; i<NUM_KEYS; i++ ) { if (key_buff2[i] >= k1 && key_buff2[i] < k2) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } } } /*omp parallel*/ #endif /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; #pragma omp parallel for reduction(+:j) for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) printf( "Full_verify: number of keys out of sort: %ld\n", (long)j ); else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { flag1 = -1;flag2 = -1;flag3 = -1;flag4 = 0;flag5 = -1;flag6 = -1;flag7 = -1;flag8 = -1; INT_TYPE i, k; INT_TYPE *key_buff_ptr, *key_buff_ptr2; #ifdef USE_BUCKETS int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE num_bucket_keys = (1L << shift); #endif key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; // if(iteration == 2) printf("max flag1 = %d\n",TEST_ARRAY_SIZE); /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { partial_verify_vals[i] = key_array[test_index_array[i]]; //kai // flag1 = i; } /* Setup pointers to key buffers */ #ifdef USE_BUCKETS key_buff_ptr2 = key_buff2; #else key_buff_ptr2 = key_array; #endif key_buff_ptr = key_buff1; #pragma omp parallel private(i, k) { INT_TYPE *work_buff, m, k1, k2; int myid = 0, num_procs = 1; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); #endif /* Bucket sort is known to improve cache performance on some */ /* cache based systems. But the actual performance may depend */ /* on cache size, problem size. */ #ifdef USE_BUCKETS work_buff = bucket_size[myid]; /* Initialize */ for( i=0; i<NUM_BUCKETS; i++ ) work_buff[i] = 0; // if(iteration == 2) printf("max flag2 = %d\n",NUM_KEYS); /* Determine the number of keys in each bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) { work_buff[key_array[i] >> shift]++; //kai //flag2 = i; } /* Accumulative bucket sizes are the bucket pointers. These are global sizes accumulated upon to each bucket */ // if(iteration == 2) printf("max flag3 = %d\n",myid); bucket_ptrs[0] = 0; for( k=0; k< myid; k++ ) { bucket_ptrs[0] += bucket_size[k][0]; //kai // flag3 = k; } //if(iteration == 2) printf("max flag4 = %d\n",NUM_BUCKETS); for( i=1; i< NUM_BUCKETS; i++ ) { bucket_ptrs[i] = bucket_ptrs[i-1]; for( k=0; k< myid; k++ ) { bucket_ptrs[i] += bucket_size[k][i]; } for( k=myid; k< num_procs; k++ ) { bucket_ptrs[i] += bucket_size[k][i-1]; } //kai flag4 = i; } //if(iteration == 2) printf("max flag5 = %d\n",NUM_KEYS); /* Sort into appropriate bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) { k = key_array[i]; key_buff2[bucket_ptrs[k >> shift]++] = k; //kai flag5 = i; } //if(iteration == 2) printf("max flag6 = %d\n, myid = %d, num_procs-1 = %d\n",NUM_BUCKETS,myid,num_procs-1); /* The bucket pointers now point to the final accumulated sizes */ if (myid < num_procs-1) { for( i=0; i< NUM_BUCKETS; i++ ) { for( k=myid+1; k< num_procs; k++ ){ bucket_ptrs[i] += bucket_size[k][i]; } //kai //flag6 = i; } } /* Now, buckets are sorted. We only need to sort keys inside each bucket, which can be done in parallel. Because the distribution of the number of keys in the buckets is Gaussian, the use of a dynamic schedule should improve load balance, thus, performance */ //if(iteration == 2) printf("max flag7 = %d\n",NUM_BUCKETS); #ifdef SCHED_CYCLIC #pragma omp for schedule(static,1) #else #pragma omp for schedule(dynamic) #endif for( i=0; i< NUM_BUCKETS; i++ ) { /* Clear the work array section associated with each bucket */ k1 = i * num_bucket_keys; k2 = k1 + num_bucket_keys; for ( k = k1; k < k2; k++ ) { key_buff_ptr[k] = 0; } /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ m = (i > 0)? bucket_ptrs[i-1] : 0; for ( k = m; k < bucket_ptrs[i]; k++ ) { key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */ /* population */ //kai flag1 = k; } /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ key_buff_ptr[k1] += m; for ( k = k1+1; k < k2; k++ ) { key_buff_ptr[k] += key_buff_ptr[k-1]; flag2 = k; } //kai flag7 = i; } #else /*USE_BUCKETS*/ #endif /*USE_BUCKETS*/ } /*omp parallel*/ /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 < k && k <= NUM_KEYS-1 ) { INT_TYPE key_rank = key_buff_ptr[k-1]; int failed = 0; switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_rank != test_rank_array[i]+(iteration-2) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_rank != test_rank_array[i]+(iteration-1) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-(iteration-1) ) failed = 1; else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'D': if( i < 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; } if( failed == 1 ) { int ground_truth = 0; if( i <= 2 ) { ground_truth = test_rank_array[i]+(iteration-1); } else { ground_truth = key_rank != test_rank_array[i]-(iteration-1); } printf( "Failed partial verification: " "iteration %d, test key %d, our result %d, ground truth is %d\n", iteration, (int)i,key_rank,ground_truth ); } } //kai //flag8 = i; } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff_ptr; } /*****************************************************************/ /************* M A I N ****************/ /*****************************************************************/ int main( int argc, char **argv ) { printf("buffer size addr is %p\n",bucket_size); printf("buffer size[0] addr is %p\n",bucket_size[0]); //kai crucial_data(key_array, "int", SIZE_OF_BUFFERS); crucial_data(key_buff1, "int", MAX_KEY); crucial_data(key_buff2, "int",SIZE_OF_BUFFERS); crucial_data(partial_verify_vals, "int", TEST_ARRAY_SIZE); crucial_data(bucket_ptrs, "int", NUM_BUCKETS); crucial_data(bucket_size, "int", NUM_BUCKETS); consistent_data(&flag1, "int", 1); consistent_data(&flag2, "int", 1); consistent_data(&flag3, "int", 1); consistent_data(&flag4, "int", 1); consistent_data(&flag5, "int", 1); consistent_data(&flag6, "int", 1); consistent_data(&flag7, "int", 1); consistent_data(&flag8, "int", 1); int i, iteration, timer_on; double timecounter; FILE *fp; /* Initialize timers */ timer_on = 0; if ((fp = fopen("timer.flag", "r")) != NULL) { fclose(fp); timer_on = 1; } timer_clear( 0 ); if (timer_on) { timer_clear( 1 ); timer_clear( 2 ); timer_clear( 3 ); } if (timer_on) timer_start( 3 ); /* Initialize the verification arrays if a valid class */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) switch( CLASS ) { case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; case 'D': test_index_array[i] = D_test_index_array[i]; test_rank_array[i] = D_test_rank_array[i]; break; }; /* Printout initial NPB info */ printf ( "\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - IS Benchmark\n\n" ); printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS ); printf( " Iterations: %d\n", MAX_ITERATIONS ); #ifdef _OPENMP printf( " Number of available threads: %d\n", omp_get_max_threads() ); #endif printf( "\n" ); if (timer_on) timer_start( 1 ); /* Generate random number sequence and subsequent keys on all procs */ create_seq( 314159265.00, /* Random number gen seed */ 1220703125.00 ); /* Random number gen mult */ alloc_key_buff(); if (timer_on) timer_stop( 1 ); /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ rank( 1 ); /* Start verification counter */ passed_verification = 0; if( CLASS != 'S' ) printf( "\n iteration\n" ); /* Start timer */ timer_start( 0 ); //kai consistent_data(&iteration, "int", 1); flush_whole_cache(); start_crash(); /* This is the main iteration */ for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ ) { if( CLASS != 'S' ) printf( " %d\n", iteration ); rank( iteration ); } //kai end_crash(); /* End of timing, obtain maximum time of all processors */ timer_stop( 0 ); timecounter = timer_read( 0 ); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ if (timer_on) timer_start( 2 ); full_verify(); if (timer_on) timer_stop( 2 ); if (timer_on) timer_stop( 3 ); /* The final printout */ if( passed_verification != 5*MAX_ITERATIONS + 1 ) passed_verification = 0; c_print_results( "IS", CLASS, (int)(TOTAL_KEYS/64), 64, 0, MAX_ITERATIONS, timecounter, ((double) (MAX_ITERATIONS*TOTAL_KEYS)) /timecounter/1000000., "keys ranked", passed_verification, NPBVERSION, COMPILETIME, CC, CLINK, C_LIB, C_INC, CFLAGS, CLINKFLAGS ); /* Print additional timers */ if (timer_on) { double t_total, t_percent; t_total = timer_read( 3 ); printf("\nAdditional timers -\n"); printf(" Total execution: %8.3f\n", t_total); if (t_total == 0.0) t_total = 1.0; timecounter = timer_read(1); t_percent = timecounter/t_total * 100.; printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent); timecounter = timer_read(0); t_percent = timecounter/t_total * 100.; printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent); timecounter = timer_read(2); t_percent = timecounter/t_total * 100.; printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent); } return 0; /**************************/ } /* E N D P R O G R A M */ /**************************/
chap_fmt_plug.c
/* iSCSI CHAP authentication cracker. Hacked together during September of 2012 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format : CHAP_N(username):$chap$id*challenge*response * * References: * * ftp://ftp.samba.org/pub/unpacked/ppp/pppd/chap-md5.c * http://www.blackhat.com/presentations/bh-usa-05/bh-us-05-Dwivedi-update.pdf * http://www.willhackforsushi.com/presentations/PEAP_Shmoocon2008_Wright_Antoniewicz.pdf */ #if FMT_EXTERNS_H extern struct fmt_main fmt_chap; #elif FMT_REGISTERS_H john_register_one(&fmt_chap); #else #include "md5.h" #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 65536 // core i7 no HT #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "chap" #define FORMAT_NAME "iSCSI CHAP authentication / EAP-MD5" #define FORMAT_TAG "$chap$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #define SALT_SIZE sizeof(struct custom_salt) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests chap_tests[] = { {"$chap$0*cc7e5247514551acdcbf782c4027bfb1*fdfdad5277812ae40956a66f3db23308", "password"}, {"$chap$0*81a49cb700e8c2ee9bc3852a506406c3*8876e228962a999637eecc2423f55f07", "password"}, {"$chap$0*e270954e7d84f99535dce2e5d7340a7d*4d64f587c7b5248406b939e1e9abeb74", "bar"}, // EAP-MD5 hashes are also supported! {"$chap$2*d7ec2fff2ada437f9dcd4e3b0df44d50*1ffc6c2659bc5bb94144fd01eb756e37", "beaVIs"}, {"$chap$2*00000000000000000000000000000000*9920418b3103652d3b80ffff04da5863", "bradtest"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { unsigned char id; /* CHAP_I */ unsigned char challenge[32]; /* CHAP_C */ int challenge_length; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* id */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* challenge */ goto err; len = strlen(p); if (len > 64 || (len&1)) goto err; if (hexlenl(p, &extra) != len || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* binary */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += FORMAT_TAG_LEN; /* skip over "$chap$" */ p = strtokm(ctcopy, "*"); cs.id = atoi(p); p = strtokm(NULL, "*"); cs.challenge_length = strlen(p) / 2; for (i = 0; i < cs.challenge_length; i++) cs.challenge[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; /* CHAP_R */ } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, &cur_salt->id, 1); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Update(&ctx, cur_salt->challenge, cur_salt->challenge_length); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void chap_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_chap = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, chap_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, chap_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
omp-parallel-nested-multi.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 2 int main(void) { omp_set_max_active_levels(4); #pragma omp parallel num_threads(THREADS) { int j = omp_get_thread_num(); #pragma omp parallel num_threads(3) { #pragma omp parallel num_threads(2) { printf("%d: %d/%d level=%d\n", j, omp_get_thread_num(), omp_get_num_threads(), omp_get_level()); } #pragma omp parallel num_threads(2) { printf("%d: %d/%d level=%d\n", j, omp_get_thread_num(), omp_get_num_threads(), omp_get_level()); } } } }
GB_binop__max_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int32) // A.*B function (eWiseMult): GB (_AemultB_08__max_int32) // A.*B function (eWiseMult): GB (_AemultB_02__max_int32) // A.*B function (eWiseMult): GB (_AemultB_04__max_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int32) // A*D function (colscale): GB (_AxD__max_int32) // D*A function (rowscale): GB (_DxB__max_int32) // C+=B function (dense accum): GB (_Cdense_accumB__max_int32) // C+=b function (dense accum): GB (_Cdense_accumb__max_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int32) // C=scalar+B GB (_bind1st__max_int32) // C=scalar+B' GB (_bind1st_tran__max_int32) // C=A+scalar GB (_bind2nd__max_int32) // C=A'+scalar GB (_bind2nd_tran__max_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT32 || GxB_NO_MAX_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
km_er.h
/* * * Header file of the KM-config algorithm (C++ version) * * * An algorithm for finding multiple core-periphery pairs in networks * * * Core-periphery structure requires something else in the network * Sadamori Kojaku and Naoki Masuda * Preprint arXiv:1710.07076 * * * Please do not distribute without contacting the authors. * * * AUTHOR - Sadamori Kojaku * * * DATE - 11 Oct 2017 */ #ifndef CP_ALGORITHM #define CP_ALGORITHM #include "cpalgorithm.h" #endif class KM_ER: public CPAlgorithm{ public: // Constructor KM_ER(); KM_ER(int num_runs); // function needed to be implemented void detect(const Graph& G); void calc_Q( const Graph& G, const vector<int>& c, const vector<double>& x, double& Q, vector<double>& q); protected: private: int _num_runs; int _round; uniform_real_distribution<double> _udist; void _km_ER_label_switching( const Graph& G, const int num_of_runs, vector<int>& c, vector<double>& x, double& Q, vector<double>& q, mt19937_64& mtrnd ); double _calc_dQ_ER(double d_i_c, double d_i_p, double N_c, double N_p, double selfloop, double x, const double rho); void _propose_new_label( const Graph& G, const vector<int>& c, const vector<double>& x, const vector<double>& sz_core, const vector<double>& sz_peri, const double rho, const int node_id, int& cprime, double& xprime, double& dQ, mt19937_64& mtrnd ); void _km_ER_label_switching_core( const Graph& G, vector<int>& c, vector<double>& x, mt19937_64& mtrnd ); void _km_ER_louvain( const Graph& G, const int num_of_runs, vector<int>& c, vector<double>& x, double& Q, vector<double>& q, mt19937_64& mtrnd ); /* void _km_ER_louvain_core( const Graph& G, vector<int>& c, vector<double>& x, mt19937_64& mtrnd ); */ void _coarsing( const Graph& G, const vector<int>& c, const vector<double>& x, Graph& newG, vector<int>& toLayerId ); void _relabeling(vector<int>& c); }; /*----------------------------- Constructor -----------------------------*/ KM_ER::KM_ER(int num_runs):CPAlgorithm(){ KM_ER(); _num_runs = num_runs; }; KM_ER::KM_ER():CPAlgorithm(){ uniform_real_distribution<double> tmp(0.0,1.0); _udist = tmp; _num_runs = 10; _mtrnd = _init_random_number_generator(); }; /*----------------------------- Functions inherited from the super class (CPAlgorithm) -----------------------------*/ void KM_ER::detect(const Graph& G){ //_km_ER_label_switching(G, _num_runs, _c, _x, _Q, _q, _mtrnd); _km_ER_louvain(G, _num_runs, _c, _x, _Q, _q, _mtrnd); //_km_ER_label_switching(G, _num_runs, _c, _x, _Q, _q, _mtrnd); } void KM_ER::calc_Q( const Graph& G, const vector<int>& c, const vector<double>& x, double& Q, vector<double>& q) { int N = G.get_num_nodes(); int K = *max_element(c.begin(), c.end()) + 1; q.assign(K, 0.0); vector<double> Nc(K, 0.0); vector<double> Np(K, 0.0); double double_M = 0.0; for (int i = 0; i < N; i++) { int sz = G.degree(i); for (int j = 0; j < sz; j++) { Neighbour nn = G.get_kth_neighbour(i, j); int nei = nn.get_node(); double wj = nn.get_w(); q[c[i]] += wj * !!(c[i] == c[nei]) * (x[i] + x[nei] - x[i] * x[nei]); double_M += wj; } Nc[c[i]]+=x[i]; Np[c[i]]+=(1-x[i]); } Q = 0; double rho = double_M / (double)(N * N); for (int k = 0; k < K; k++) { q[k] = (q[k] - rho * (Nc[k]*Nc[k] + 2 * Nc[k] * Np[k] )) / double_M; Q += q[k]; } } /*----------------------------- Private functions (internal use only) -----------------------------*/ void KM_ER::_km_ER_label_switching( const Graph& G, const int num_of_runs, vector<int>& c, vector<double>& x, double& Q, vector<double>& q, mt19937_64& mtrnd ) { int N = G.get_num_nodes(); c.clear(); x.clear(); c.assign(N, 0); x.assign(N, 1); Q = -1; for (int i = 0; i < num_of_runs; i++) { vector<int> ci; vector<double> xi; vector<double> qi; double Qi = 0.0; _km_ER_label_switching_core(G, ci, xi, _mtrnd); calc_Q(G, ci, xi, Qi, qi); if (Qi > Q) { c = ci; x = xi; q.clear(); q = qi; Q = Qi; } } } /* void KM_ER::_km_ER_label_switching( const Graph& G, const int num_of_runs, vector<int>& c, vector<double>& x, double& Q, vector<double>& q, mt19937_64& mtrnd ) { int N = G.get_num_nodes(); c.clear(); x.clear(); c.assign(N, 0); x.assign(N, true); // Generate \hat q^{(s)} and \hat n^{(s)} (1 \leq s \leq S) // create random number generator per each thread int numthread = 1; #ifdef _OPENMP # pragma omp parallel { numthread = omp_get_num_threads(); } #endif cout<<numthread<<endl; vector<mt19937_64> mtrnd_list(numthread); for(int i = 0; i < numthread; i++){ mt19937_64 mtrnd = _init_random_number_generator(); mtrnd_list[i] = mtrnd; } Q = -1; #ifdef _OPENMP #pragma omp parallel for shared(c, x, Q, q, N, G, mtrnd_list) #endif for (int i = 0; i < num_of_runs; i++) { vector<int> ci; vector<double> xi; vector<double> qi; double Qi = 0.0; int tid = 0; #ifdef _OPENMP tid = omp_get_thread_num(); #endif mt19937_64 mtrnd = mtrnd_list[tid]; _km_ER_label_switching_core(G, ci, xi, mtrnd); calc_Q(G, ci, xi, Qi, qi); #pragma omp critical { if (Qi > Q) { c = ci; x = xi; q.clear(); q = qi; Q = Qi; } } } } */ double KM_ER::_calc_dQ_ER(double d_i_c, double d_i_p, double N_c, double N_p, double selfloop, double x, const double rho) { return 2 * (d_i_c + d_i_p * (x) - rho * (N_c + N_p * x) ) + x * (selfloop - 2 * rho); } void KM_ER::_propose_new_label( const Graph& G, const vector<int>& c, const vector<double>& x, const vector<double>& sz_core, const vector<double>& sz_peri, const double rho, const int node_id, int& cprime, double& xprime, double& dQ, mt19937_64& mtrnd) { int N = G.get_num_nodes(); int neighbourNum = G.degree(node_id); vector<double> edges_to_core(N, 0.0); vector<double> edges_to_peri(N, 0.0); double selfloop = 0; for (int j = 0; j < neighbourNum; j++) { Neighbour nn = G.get_kth_neighbour(node_id, j); int nei = nn.get_node(); double wj = nn.get_w(); if(node_id == nei){ selfloop+= wj; continue; } edges_to_core[c[nei]] += wj * x[nei]; edges_to_peri[c[nei]] += wj * (1-x[nei]); } double N_core = sz_core[c[node_id]] - x[node_id]; double N_peri = sz_peri[c[node_id]] - (1-x[node_id]); double dQold = _calc_dQ_ER(edges_to_core[c[node_id]], edges_to_peri[c[node_id]], N_core, N_peri, selfloop, x[node_id], rho); dQ = 0; for (int j = 0; j < neighbourNum; j++) { Neighbour nn = G.get_kth_neighbour(node_id, j); int nei = nn.get_node(); //double wj = nn.get_w(); int cid = c[nei]; N_core = sz_core[cid] - x[node_id] * (double)!!( c[node_id] == cid); N_peri = sz_peri[cid] - (1-x[node_id]) * (double)!!(c[node_id] == cid); double Q_i_core = _calc_dQ_ER(edges_to_core[cid], edges_to_peri[cid], N_core, N_peri, selfloop, 1, rho); double Q_i_peri = _calc_dQ_ER(edges_to_core[cid], edges_to_peri[cid], N_core, N_peri, selfloop, 0, rho); Q_i_core -= dQold; Q_i_peri -= dQold; if (MAX(Q_i_core, Q_i_peri) < dQ) continue; if (Q_i_peri < Q_i_core) { xprime = 1.0; cprime = cid; dQ = Q_i_core; } else if (Q_i_peri > Q_i_core) { xprime = 0.0; cprime = cid; dQ = Q_i_peri; } else { cprime = cid; if(_udist(mtrnd) < 0.5){ xprime = 1; }else{ xprime = 0; } dQ = Q_i_core; } } } void KM_ER::_km_ER_label_switching_core( const Graph& G, vector<int>& c, vector<double>& x, mt19937_64& mtrnd ) { /* Variable declarations */ int N = G.get_num_nodes(); vector<double> sz_core(N, 0.0); vector<double> sz_peri(N, 0.0); vector<int> order(N); double M = 0; bool isupdated = false; c.clear(); x.clear(); c.assign(N, 0); x.assign(N, 1.0); _round = 0; for (int i = 0; i < N; i++) { order[i] = i; c[i] = i; sz_core[i] += x[i]; sz_peri[i] += 1-x[i]; M += G.wdegree(i); }; _round++; double rho = M / (double)( N * N ); M = M / 2; /* Label switching algorithm */ do { isupdated = false; shuffle(order.begin(), order.end(), mtrnd); for (int scan_count = 0; scan_count < N; scan_count++) { int i = order[scan_count]; int cprime = c[i]; // c' double xprime = x[i]; // x' double dQ = 0; _propose_new_label(G, c, x, sz_core, sz_peri, rho, i, cprime, xprime, dQ, mtrnd); if (dQ <= 0) continue; if ( (c[i] == cprime) & (x[i] == xprime) ) continue; sz_core[c[i]] -= x[i]; sz_peri[c[i]] -= 1-x[i]; sz_core[cprime] += xprime; sz_peri[cprime] += (1-xprime); c[i] = cprime; x[i] = xprime; isupdated = true; } _round++; } while (isupdated == true); /* Remove empty core-periphery pairs */ std::vector<int> labs; for (int i = 0; i < N; i++) { int cid = -1; int labsize = (int) labs.size(); for (int j = 0; j < labsize; j++) { if (labs[j] == c[i]) { cid = j; break; } } if (cid < 0) { labs.push_back(c[i]); cid = (int) labs.size() - 1; } c[i] = cid; } } /* Louvain algorithm */ void KM_ER::_km_ER_louvain( const Graph& G, const int num_of_runs, vector<int>& c, vector<double>& x, double& Q, vector<double>& q, mt19937_64& mtrnd ){ // Intiialise variables int N = G.get_num_nodes(); c.clear(); x.clear(); c.assign(N, 0); x.assign(N, 1); for (int i = 0; i < N; i++) c[i] = i; vector<int>ct = c; // label of each node at tth iteration vector<double>xt = x; // label of each node at tth iteration. Graph cnet_G; // coarse network vector<int> toLayerId; //toLayerId[i] maps 2*c[i] + x[i] to the id of node in the coarse network _coarsing(G, ct, xt, cnet_G, toLayerId); // Initialise toLayerId Q = 0; // quality of the current partition int cnet_N; do{ cnet_N = cnet_G.get_num_nodes(); // Core-periphery detection vector<int> cnet_c; // label of node in the coarse network, Mt vector<double> cnet_x; // label of node in the coarse network, Mt double Qt = 0; vector<double> qt; _km_ER_label_switching(cnet_G, num_of_runs, cnet_c, cnet_x, Qt, qt, mtrnd); //_km_ER_label_switching_core(cnet_G, cnet_c, cnet_x, mtrnd); // Update the label of node in the original network, ct and xt. for(int i = 0; i< N; i++){ int cnet_id = toLayerId[2 * ct[i] + (int)xt[i]]; ct[i] = cnet_c[ cnet_id ]; xt[i] = cnet_x[ cnet_id ]; } // Compute the quality //calc_Q(G, ct, xt, Qt, qt); calc_Q(cnet_G, cnet_c, cnet_x, Qt, qt); if(Qt>=Q){ // if the quality is the highest among those detected so far c = ct; x = xt; Q = Qt; q = qt; } // Coarsing Graph new_cnet_G; _coarsing(cnet_G, cnet_c, cnet_x, new_cnet_G, toLayerId); cnet_G = new_cnet_G; int sz = cnet_G.get_num_nodes(); if(sz == cnet_N) break; }while( true ); _relabeling(c); } void KM_ER::_coarsing( const Graph& G, const vector<int>& c, const vector<double>& x, Graph& newG, vector<int>& toLayerId ){ int N = (int) c.size(); vector<int> ids(N,0); int maxid = 0; for(int i = 0;i<N;i++){ ids[i] = 2 * c[i] + (int)x[i]; maxid = MAX(maxid, ids[i]); } _relabeling(ids); toLayerId.clear(); toLayerId.assign(maxid+1,0); for(int i = 0;i<N;i++){ toLayerId[2 * c[i] + (int)x[i]] = ids[i]; } int K = *max_element(ids.begin(), ids.end()) + 1; newG = Graph(K); for(int i = 0;i<N;i++){ int mi = 2 * c[i] + (int)x[i]; int sz = G.degree(i); for(int j = 0;j<sz;j++){ Neighbour nb = G.get_kth_neighbour(i, j); int nei = nb.get_node(); double w = nb.get_w(); int mj = 2 * c[nei] + (int)x[nei]; int sid = toLayerId[mi]; int did = toLayerId[mj]; newG.addEdge(sid, did, w); } } newG.compress(); } void KM_ER::_relabeling( vector<int>& c ){ int N = (int)c.size(); std::vector<int> labs; for (int i = 0; i < N; i++) { int cid = -1; int labsize = (int)labs.size(); for (int j = 0; j < labsize; j++) { if (labs[j] == c[i]) { cid = j; break; } } if (cid < 0) { labs.push_back(c[i]); cid = (int) labs.size() - 1; } c[i] = cid; } }
interpolate_v2_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
GB_binop__bclr_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_03__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int64) // C=scalar+B GB (_bind1st__bclr_int64) // C=scalar+B' GB (_bind1st_tran__bclr_int64) // C=A+scalar GB (_bind2nd__bclr_int64) // C=A'+scalar GB (_bind2nd_tran__bclr_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITCLR (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bclr_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bclr_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bclr_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpl_image_iqe.c
/* * This file is part of the ESO Common Pipeline Library * Copyright (C) 2001-2017 European Southern Observatory * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include <config.h> #endif /*----------------------------------------------------------------------------- Includes -----------------------------------------------------------------------------*/ #include "cpl_image_iqe.h" #include "cpl_error_impl.h" #include "cpl_memory.h" #include "cpl_tools.h" #include "cpl_bivector.h" #include "cpl_image_basic.h" #include "cpl_image_defs.h" #include "cpl_math_const.h" #include "cpl_mpfit.h" #include <math.h> #include <string.h> #include <assert.h> /*----------------------------------------------------------------------------- Private Function prototypes - more are declared below -----------------------------------------------------------------------------*/ static int cpl_iqe(float *, int, int, float *, float *); /*----------------------------------------------------------------------------- Function codes -----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /** @ingroup cpl_image @brief Compute an image quality estimation for an object @param in the input image @param llx @param lly the zone to analyse ((1, 1) for the first pixel) @param urx The zone must be at least 4 by 4 pixels @param ury @return a newly allocated cpl_bivector containing the results or NULL in error case. This function makes internal use of the iqe() MIDAS function (called here cpl_iqe()) written by P. Grosbol. Refer to the MIDAS documentation for more details. This function has proven to give good results over the years when called from RTD. The goal is to provide the exact same functionality in CPL as the one provided in RTD. The code is simply copied from the MIDAS package, it is not maintained by the CPL team. The returned object must be deallocated with cpl_bivector_delete(). It contains in the first vector the computed values, and in the second one, the associated errors. The computed values are: - x position of the object - y position of the object - FWHM along the major axis - FWHM along the minor axis - the angle of the major axis with the horizontal in degrees - the peak value of the object - the background computed The bad pixels map of the image is not taken into account. The input image must be of type float. Possible #_cpl_error_code_ set in this function: - CPL_ERROR_NULL_INPUT if (one of) the input pointer(s) is NULL - CPL_ERROR_ILLEGAL_INPUT if the specified zone is not valid or if the computation fails on this zone - CPL_ERROR_INVALID_TYPE if the input image has the wrong type @note This function may lead to a strong underestimation of the sigmas and FWHMs (up to 25% underestimation in the case of noiseless data with a box 4 times the sigma, 1% underestimation in the case of noiseless data with a box 7 times the sigma). */ /*----------------------------------------------------------------------------*/ cpl_bivector * cpl_image_iqe(const cpl_image * in, cpl_size llx, cpl_size lly, cpl_size urx, cpl_size ury) { cpl_image * extracted; float parm[8], sdev[8]; float * pextracted; cpl_bivector * res; double * res_x; double * res_y; int iqe_error; cpl_size nx, ny; int inx, iny; /* Check entries */ cpl_ensure(in, CPL_ERROR_NULL_INPUT, NULL); cpl_ensure(in->type == CPL_TYPE_FLOAT, CPL_ERROR_INVALID_TYPE, NULL); cpl_ensure(urx >= llx + 3, CPL_ERROR_ILLEGAL_INPUT, NULL); cpl_ensure(ury >= lly + 3, CPL_ERROR_ILLEGAL_INPUT, NULL); /* Extract the image */ extracted = cpl_image_extract(in, llx, lly, urx, ury); cpl_ensure(extracted, CPL_ERROR_ILLEGAL_INPUT, NULL); nx = cpl_image_get_size_x(extracted); ny = cpl_image_get_size_y(extracted); inx = (int)nx; iny = (int)ny; if ((cpl_size)inx != nx || (cpl_size)iny != ny) { cpl_image_delete(extracted); (void)cpl_error_set_(CPL_ERROR_ILLEGAL_INPUT); return NULL; } /* Get the pointer to the data */ pextracted = cpl_image_get_data_float(extracted); iqe_error = cpl_iqe(pextracted, inx, iny, parm, sdev); cpl_image_delete(extracted); cpl_ensure(!iqe_error, CPL_ERROR_ILLEGAL_INPUT, NULL); /* Create the result cpl_bivector */ res = cpl_bivector_new(7); res_x = cpl_bivector_get_x_data(res); res_y = cpl_bivector_get_y_data(res); res_x[0] = (double)llx + parm[0]; res_y[0] = sdev[0]; res_x[1] = (double)lly + parm[2]; res_y[1] = sdev[2]; res_x[2] = parm[1]; res_y[2] = sdev[1]; res_x[3] = parm[3]; res_y[3] = sdev[3]; res_x[4] = parm[4]; res_y[4] = sdev[4]; res_x[5] = parm[5]; res_y[5] = sdev[5]; res_x[6] = parm[6]; res_y[6] = sdev[6]; return res; } /* Declare local functions private to avoid namespace conflicts */ /* Also add function declaration, to prevent problems in case the ieq() code is copied once again from RTD. */ static int estm9p(float *, int, int, int, int, float *, float *, float *); static int iqebgv(float *, int, int, float *, float *, int *); static int iqemnt(float *, int, int, float, float, float *, int); static int iqesec(float *, int, int, float, float *, float *, int); static int iqefit(float *, int, int, float, float *, float *, float *, int); static int g2efit(float *, float *, int, int, float [], float [], double *); static int g2einit(float *, float *, int, int); static int g2efunc(int, float *, float *, float *, float *, float *); /******************************************************************************/ /* iqe() COPIED FROM RTD 20-06-2003 */ /* - unused function definition indexd() removed */ /* - static modifier added to private functions */ /* - old-style function definitions replaced */ /******************************************************************************/ /* define constants */ #define hsq2 CPL_MATH_SQRT1_2 /* constant 0.5*sqrt(2) */ #define MA 6 /* No. of variables */ #define MITER 64 /* Max. no. of iterations */ #define MMA 16 /* Max. size of matrix */ #define SWAP(a,b) {double temp=(a);(a)=(b);(b)=temp;} /* end define */ static float *pval; static float *pwght; static int mxx, mp; static double w[9]; static double xi[9]; static double yi[9]; #ifdef _OPENMP #pragma omp threadprivate(pval,pwght,mxx,mp,w,xi,yi) #endif static int cpl_iqe( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Estimate parameters for the Image Quality using a small frame around the object. The following parameters are estimated and given in the array 'parm': parm[0] = mean X position within array, first pixel = 0 parm[1] = FWHM in X parm[2] = mean Y position within array, first pixel = 0 parm[3] = FWHM in Y parm[4] = angle of major axis, degrees, along X = 0 parm[5] = peak value of object above background parm[6] = mean background level Further, estimates of the standard deviation of the parameters are given in 'sdev'. The routine is just a sequence of calls to 'iqebgv', 'iqemnt', 'iqesec' and 'iqefit'. .RETURN status, 0: OK, <0: estimate failed, ------------------------------------------------------------------------*/ float *pfm, int mx, int my, float *parm, float *sdev ) { int n, err, nbg; int winsize; float bgv, bgs; float ap[6], cv[6], est[6], sec[6]; for (n=0; n<7; n++) parm[n] = sdev[n] = 0.0; winsize = (mx * my) - 1; /* size of sub window */ if ((err=iqebgv(pfm, mx, my, &bgv, &bgs, &nbg))) return -1; parm[6] = bgv; sdev[6] = bgs; if ((err=iqemnt(pfm, mx, my, bgv, bgs, est, winsize))) return -2; parm[0] = est[1]; parm[1] = CPL_MATH_FWHM_SIG*est[2]; /* Sigma to FWHM */ parm[2] = est[3]; parm[3] = CPL_MATH_FWHM_SIG*est[4]; /* Sigma to FWHM */ parm[5] = est[0]; if ((err=iqesec(pfm, mx, my, bgv, est, sec, winsize))) return -3; parm[4] = CPL_MATH_DEG_RAD*sec[5]; /* Radian to Degrees */ if ((err=iqefit(pfm, mx, my, bgv, sec, ap, cv, winsize))<0) return -4; parm[0] = ap[1]; sdev[0] = cv[1]; parm[1] = CPL_MATH_FWHM_SIG*ap[2]; /* Sigma to FWHM */ sdev[1] = CPL_MATH_FWHM_SIG*cv[2]; /* Sigma to FWHM */ parm[2] = ap[3]; sdev[2] = cv[3]; parm[3] = CPL_MATH_FWHM_SIG*ap[4]; /* Sigma to FWHM */ sdev[3] = CPL_MATH_FWHM_SIG*cv[4]; /* Sigma to FWHM */ parm[4] = fmod(CPL_MATH_DEG_RAD*ap[5]+180.0, 180.0); sdev[4] = CPL_MATH_DEG_RAD*cv[5]; /* Radian to Degrees */ if (sdev[4] > 180.) sdev[4] = 180.0; /* max is: Pi */ parm[5] = ap[0]; sdev[5] = cv[0]; return 0; } static int iqebgv( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Estimate background level for subimage .RETURN status, 0: OK, -1:no buffer space, -2: no points left ------------------------------------------------------------------------*/ float *pfm, int mx, int my, float *bgm, float *bgs, int *nbg ) { int n, m, ns, ms, nt, mt; float *pfb, *pwb, *pf, *pw; float *pf0, *pf1, *pf2, *pf3, *pfs0, *pfs1, *pfs2, *pfs3; double val, fks, ba, bm, bs; *bgm = 0.0; *bgs = 0.0; *nbg = 0; pfs0 = pfm; pfs1 = pfm + mx - 1; pfs2 = pfm + mx*(my-1); pfs3 = pfm + mx*my - 1; ns = (mx<my) ? mx - 1 : my - 1; ms = (mx<my) ? mx/4 : my/4; pfb = (float *) cpl_calloc(8*ns*ms, sizeof(float)); if (!pfb) return -1; pwb = pfb + 4*ns*ms; /* extrat edge of matrix from each corner */ nt = 0; pf = pfb; for (m=0; m<ms; m++) { pf0 = pfs0; pf1 = pfs1; pf2 = pfs2; pf3 = pfs3; for (n=0; n<ns; n++) { *pf++ = *pf0++; *pf++ = *pf1; pf1 += mx; *pf++ = *pf2; pf2 -= mx; *pf++ = *pf3--; } nt += 4*ns; ns -= 2; pfs0 += mx + 1; pfs1 += mx - 1; pfs2 -= mx - 1; pfs3 -= mx + 1; } /* skip all elements with zero weight and sort clean array */ pf0 = pfb; pw = pwb; n = nt; mt = nt; while (n--) *pw++ = 1.0; cpl_tools_sort_ascn_float(pfb, mt); nt = mt; /* first estimate of mean and rms */ m = mt/2; n = mt/20; ba = pfb[m]; bs = 0.606*(ba-pfb[n]); /* 5% point at 1.650 sigma */ if (bs<=0.0) bs = sqrt(fabs(ba)); /* assume sigma of Poisson dist. */ *bgm = ba; /* then do 5 loops kappa sigma clipping */ for (m=0; m<5; m++) { pf = pfb; pw = pwb; fks = 5.0 * bs; bm = bs = 0.0; mt = 0; for (n=0; n<nt; n++, pw++) { val = *pf++; if (0.0<*pw && fabs(val-ba)<fks) { bm += val; bs += val*val; mt++; } else *pw = 0.0; } if (mt<1) { cpl_free (pfb); return -2; } ba = bm/mt; bs = bs/mt - ba*ba; bs = (0.0<bs) ? sqrt(bs) : 0.0; } /* set return values and clean up */ *bgm = ba; *bgs = bs; *nbg = mt; cpl_free(pfb); return 0; } static int iqemnt( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Find center of object and do simple moment analysis .COMMEMTS The parameter array 'amm' is as follows: amm[0] = amplitude over background amm[1] = X center, amm[2] = X sigma; amm[3] = Y center, amm[4] = Y sigma; amm[5] = angle of major axis .RETURN status, 0: OK, -1: mean<0.0 ------------------------------------------------------------------------*/ float *pfm, int mx, int my, float bgv, float bgs, float *amm, int winsize ) { int n, nx, ny, nt, nxc, nyc, ndx, ndy, ioff; int k, ki, ks, kn, psize; float av, dx, dy; float *pf; double val, x, y, dv, xm, ym; double am, ax, ay, axx, ayy, axy; /* Missing initialization fixed */ ndx = ndy = 0; av = 0.0; dv = 5.0*bgs; xm = mx - 1.0; ym = my - 1.0; for (nx=0; nx<6; nx++) amm[nx] = 0.0; /* get approx. center of object by going up along the gradient */ n = nx = ny = 1; nxc = mx/2; nyc = my/2; nt = (nxc<nyc) ? nxc : nyc; while (nt--) { if (estm9p(pfm, mx, my, nxc, nyc, &av, &dx, &dy)) break; if (n) n = 0; else { if (dx*ndx<0.0) nx = 0; if (dy*ndy<0.0) ny = 0; } if (!nx && !ny) break; ndx = (0.0<dx) ? nx : -nx; ndy = (0.0<dy) ? ny : -ny; nxc += ndx; nyc += ndy; } /* then try a simple moment of pixels above 5 sigma */ y = 0.0; nt = 0; ny = my; pf = pfm; ax = ay = 0.0; while (ny--) { x = 0.0; nx = mx; /* ojo! */ while (nx--) { val = *pf++ - bgv; if (dv<val) { ax += x; ay += y; nt++; } x += 1.0; } y += 1.0; } if (nt<1) return -1; nx = floor(ax/nt); ny = floor(ay/nt); val = pfm[nx+mx*ny]; if (av<val) { nxc = nx; nyc = ny; } /* the higher peak wins */ /* finally, compute moments just around this position */ nt = 0; nx = 1; x = nxc; y = nyc; ioff = nxc+mx*nyc; n = (mx<my) ? mx-1 : my-1; pf = pfm + ioff; psize = pf - pfm; if ((psize < 0) || (psize > winsize)) return -99; val = *pf - bgv; am = val; ax = val*x; ay = val*y; axx = val*x*x; ayy = val*y*y; axy = val*x*y; nt++; ki = ks = kn = 1; while (n--) { k = kn; if (!ki && ks==-1) { if (nx) nx = 0; else break; } ioff = (ki) ? ks : ks*mx; while (k--) { if (ki) x += ks; else y += ks; if (x<0.0 || y<0.0 || xm<x || ym<y) break; pf += ioff; psize = pf - pfm; if ((psize < 0) || (psize > winsize)) break; val = *pf - bgv; if (dv<val) { am += val; ax += val*x; ay += val*y; axx += val*x*x; ayy += val*y*y; axy += val*x*y; nt++; nx++; } } if ((ki=(!ki))) { ks = -ks; kn++; } } if (am<=0.0) return -1; /* normalize the moments and put them in to the output array */ amm[1] = ax/am; amm[3] = ay/am; axx = axx/am - amm[1]*amm[1]; amm[2] = (0.0<axx) ? sqrt(axx) : 0.0; ayy = ayy/am - amm[3]*amm[3]; amm[4] = (0.0<ayy) ? sqrt(ayy) : 0.0; axy = (axy/am-amm[1]*amm[3])/axx; amm[5] = fmod(atan(axy)+CPL_MATH_PI, CPL_MATH_PI); nx = amm[1]; ny = amm[3]; amm[0] = pfm[nx+ny*mx] - bgv; return 0; } static int estm9p( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Estimate parameters for 3x3 pixel region .RETURN status, 0:OK, -1:out of range, ------------------------------------------------------------------------*/ float *pfm, int mx, int my, int nx, int ny, float *rm, float *dx, float *dy ) { int n, nt, ix, iy; float a, am; float *pfb, *pwb, fb[9], _fb[9], wb[9]; cpl_size idx[9]; /* check if 3x3 region is fully within frame */ if (nx<1 || mx<nx-2 || ny<1 || my<ny-2) return -1; /* extract region into local array and generate a rank index for it */ iy = 3; pfb = fb; pwb = wb; pfm += nx-1 + mx*(ny-1); while(iy--) { ix = 3; while (ix--) { *pfb++ = *pfm++; *pwb++ = 1.0; } pfm += mx - 3; } memcpy(_fb, fb, 9 * sizeof(float)); cpl_tools_sort_stable_pattern_float(_fb, 9, 0, 0, idx); /* omit largest value and estimate mean */ wb[idx[8]] = 0.0; nt = 0; am = 0.0; for (n=0; n<9; n++) { if (0.0<wb[n]) { am += fb[n]; nt++; } } *rm = am/nt;; /* calculate mean gradient in X and Y */ a = am = 0.0; ix = iy = 0; for (n=0; n<9; n +=3) { if (0.0<wb[n]) a += fb[n], ix++; if (0.0<wb[n+2]) am +=fb[n+2], iy++; } *dx = 0.5*(am/iy - a/ix); a = am = 0.0; ix = iy = 0; for (n=0; n<3; n++) { if (0.0<wb[n]) a += fb[n], ix++; if (0.0<wb[n+6]) am +=fb[n+6], iy++; } *dy = 0.5*(am/iy - a/ix); return 0; } static int iqesec( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Perform a sector analysis of object. Estimates for center and size are given in 'est' which is used for bootstrap. .COMMEMTS The parameter arrays 'est' and 'sec' are as follows sec[0] = amplitude over background sec[1] = X center, sec[2] = X sigma; sec[3] = Y center, sec[4] = Y sigma; sec[5] = angle of major axis .RETURN status, 0:OK, -1: no buffer, ------------------------------------------------------------------------*/ float *pfm, int mx, int my, float bgv, float *est, float *sec, int winsize ) { int n, k, ki, ks, kn, nxc, nyc, ioff, idx; int ns[8], psize; float *pf, f; double x, y, xm, ym, xc, yc, dx, dy; double r, rl, rh, sb[8], a2r, a2i; /* initiate basic variables */ for (n=0; n<6; n++) sec[n] = 0.0; for (n=0; n<8; n++) sb[n] = 0.0, ns[n] = 0; xc = x = est[1]; xm = mx - 1.0; yc = y = est[3]; ym = my - 1.0; if (est[2]<est[4]) { rl = 2.0*est[2]; rh = 4.0*est[4]; n = ceil(16.0*est[4]); } else { rl = 2.0*est[4]; rh = 4.0*est[2]; n = ceil(16.0*est[2]); } /* extract the sectors around the center of the object */ nxc = floor(x+0.5); nyc = floor(y+0.5); ioff = nxc+mx*nyc; pf = pfm + ioff; ki = ks = kn = 1; while (n--) { k = kn; ioff = (ki) ? ks : ks*mx; while (k--) { if (ki) x += ks; else y += ks; if (x<0.0 || y<0.0 || xm<x || ym<y) break; pf += ioff; psize = pf - pfm; if ((psize < 0) || (psize > winsize)) break; dx = x - xc; dy = y - yc; r = sqrt(dx*dx + dy*dy); if (rl<r && r<rh) { f = *pf - bgv; idx = ((int) (CPL_MATH_4_PI*atan2(y-yc,x-xc)+8.5))%8; sb[idx] += (0.0<f) ? f : 0.0; ns[idx]++; } } if ((ki=(!ki))) { ks = -ks; kn++; } } /* normalize the sector array and do explicit FFT for k=1,2 */ for (n=0; n<8; n++) { if (ns[n]<1) ns[n] = 1; sb[n] /= ns[n]; } a2r = sb[0]-sb[2]+sb[4]-sb[6]; a2i = sb[1]-sb[3]+sb[5]-sb[7]; for (n=0; n<6; n++) sec[n] = est[n]; /* copy estimates over */ if (a2r==0.0 && a2i==0.0) return -2; sec[5] = fmod(0.5*atan2(a2i,a2r), CPL_MATH_PI); return 0; } static int iqefit( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Fit 2D Gaussian function to PSF .COMMEMTS The parameter arrays 'est' and 'ap' are as follows ap[0] = amplitude over background ap[1] = X center, ap[2] = X sigma; ap[3] = Y center, ap[4] = Y sigma; ap[5] = angle of major axis .RETURN no. of iterations, <0: error, -10: no buffer ------------------------------------------------------------------------*/ float *pfm, int mx, int my, float bgv, float *est, float *ap, float *cm, int winsize ) { int n, ix, iy, nx, ny, nxs, nys, psize; float *pfb, *pwb, *pf, *pw, *pfmo; double chi; /* initialize basic variables */ for (n=0; n<6; n++) ap[n] = cm[n] = 0.0; /* allocate buffer for a 4 sigma region around the object */ pfmo = pfm; /* keep original start of array */ nxs = floor(est[1] - 4.0*est[2]); if (nxs<0) nxs = 0; nys = floor(est[3] - 4.0*est[4]); if (nys<0) nys = 0; nx = ceil(8.0*est[2]); if (mx<nxs+nx) nx = mx - nxs; ny = ceil(8.0*est[4]); if (my<nys+ny) ny = my - nys; pfb = (float *) cpl_calloc(2*nx*ny, sizeof(float)); if (!pfb) return -10; pwb = pfb + nx*ny; /* extract region from external buffer */ pfm += nxs + mx*nys; psize = pfm - pfmo; if ((psize < 0) || (psize > winsize)) { cpl_free (pfb); return -99; } pf = pfb; pw = pwb; iy = ny; while (iy--) { ix = nx; while (ix--) { *pf++ = *pfm++ - bgv; psize = pfm - pfmo; if (psize > winsize) { cpl_free (pfb); return -99; } *pw++ = 1.0; } pfm += mx - nx; psize = pfm - pfmo; if ((psize < 0) || (psize > winsize)) { cpl_free (pfb); return -99; } } /* initialize parameters for fitting */ ap[0] = est[0]; ap[1] = est[1] - nxs; ap[2] = est[2]; ap[3] = est[3] - nys; ap[4] = est[4]; ap[5] = est[5]; /* perform actual 2D Gauss fit on small subimage */ n = g2efit(pfb, pwb, nx, ny, ap, cm, &chi); /* normalize parameters and uncertainties, and exit */ ap[1] += nxs; ap[3] += nys; cpl_free(pfb); return n; } static int g2einit( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Initiate gauss fit function, set pointers to data and weights .RETURN status, 0: OK, -1: error - bad pixel no. ------------------------------------------------------------------------*/ float *val, float *wght, int nx, int ny ) { double fh, w1, w2, w3; if (nx<1) { /* if NO x-pixel set to NULL */ pval = (float *) 0; pwght = (float *) 0; mxx = mp = 0; return -1; } pval = val; /* otherwise initiate static varables */ pwght = wght; mxx = nx; mp = (0<ny) ? ny*nx : nx; fh = 0.5*sqrt(3.0/5.0); /* positions and weights for integration */ w1 = 16.0/81.0; w2 = 10.0/81.0; w3 = 25.0/324.0; xi[0] = 0.0; yi[0] = 0.0; w[0] = w1; xi[1] = 0.0; yi[1] = fh; w[1] = w2; xi[2] = 0.0; yi[2] = -fh; w[2] = w2; xi[3] = fh; yi[3] = 0.0; w[3] = w2; xi[4] = -fh; yi[4] = 0.0; w[4] = w2; xi[5] = fh; yi[5] = fh; w[5] = w3; xi[6] = -fh; yi[6] = fh; w[6] = w3; xi[7] = fh; yi[7] = -fh; w[7] = w3; xi[8] = -fh; yi[8] = -fh; w[8] = w3; return 0; } static int g2efunc( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE evaluate function value for given index .RETURN status, 0: OK, 1: error - bad pixel no. ------------------------------------------------------------------------*/ int idx, float *val, float *fval, float *psig, float *a, float *dyda ) { int n; double ff, sum, ci, si; double xc, yc, xx, yy, x, y; if (idx<0 || mp<=idx) return -1; /* check index */ if (pwght && pwght[idx]<0.0) return 1; /* check if valid pixel */ if (a[2]<=0.0 || a[4]<=0.0) return -2; /* negative sigmas */ xc = (double) (idx%mxx) - a[1]; yc = (double) (idx/mxx) - a[3]; *val = pval[idx]; *psig = (pwght) ? pwght[idx] : 1.0; si = sin(a[5]); ci = cos(a[5]); sum = 0.0; for (n=0; n<9; n++) { x = xc + xi[n]; y = yc + yi[n]; xx = (ci*x + si*y)/a[2]; yy = (-si*x + ci*y)/a[4]; sum += w[n]*exp(-0.5*(xx*xx+yy*yy)); } xx = (ci*xc + si*yc)/a[2]; yy = (-si*xc + ci*yc)/a[4]; ff = a[0]*sum; *fval = ff; dyda[0] = sum; dyda[1] = ff*(ci*xx/a[2] - si*yy/a[4]); dyda[2] = ff*xx*xx/a[2]; dyda[3] = ff*(si*xx/a[2] + ci*yy/a[4]); dyda[4] = ff*yy*yy/a[4]; dyda[5] = ff*((si*xc-ci*yc)*xx/a[2] + (ci*xc+si*yc)*yy/a[4]); return 0; } static int g2efunc2(int ndata, int npar, double *p, double *deviates, double **derivs, void *d) { int i, j; float * dyda = cpl_malloc(npar * sizeof(*dyda)); float fp[MA]; assert(d == NULL); for (j = 0; j < MA; j++) { fp[j] = p[j]; } /* Compute function deviates */ for (i=0; i<ndata; i++) { float z, err, val; int st = g2efunc(i, &val, &z, &err, fp, dyda); if (st < 0) { /* error */ cpl_free(dyda); return st; } if (st > 0 || err == 0.) { /* bad pixel */ deviates[i] = 0.; if (derivs) { int k; for (k=0; k<npar; k++) { if (derivs[k]) { derivs[k][i] = 0.; } } } } else { deviates[i] = (val - z) / err; if (derivs) { int k; for (k=0; k<npar; k++) { if (derivs[k]) { derivs[k][i] = -dyda[k] / err; } } } } } cpl_free(dyda); return 0; } static int g2efit( /*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .PURPOSE Perform 2D Gauss fit .RETURN status, no. of iterations, else -1: error - bad pixel no, -2: error - iteration. ------------------------------------------------------------------------*/ float *val, float *wght, int nx, int ny, float ap[MA], float cv[MA], double *pchi ) { int i; int status; double * a = cpl_malloc((MA) * sizeof(*a)); mp_par * pars = cpl_calloc(MA, sizeof(*pars)); mp_result result; if (g2einit(val, wght, nx, ny)) return -1; memset(&result, 0, sizeof(result)); for (i = 0; i < MA; i++) { a[i] = ap[i]; pars[i].side = 3; } /* no negative sigma */ pars[2].limited[0] = 1; pars[2].limits[0] = 0.; pars[4].limited[0] = 1; pars[4].limits[0] = 0.; result.xerror = cpl_malloc(MA * sizeof(result.xerror[0])); status = cpl_mpfit((mp_func)&g2efunc2, nx * ny, MA, a, pars, NULL, NULL, &result); for (i = 0; i < MA; i++) { ap[i] = a[i]; cv[i] = result.xerror[i]; } ap[5] = fmod(ap[5], 4.0*atan(1.0)); *pchi = result.bestnorm; cpl_free(a); cpl_free(result.xerror); cpl_free(pars); if (status <= 0) return -2; if (ap[1]<0.0 || nx<ap[1] || ap[3]<0.0 || ny<ap[3]) return -3; if (result.niter > MITER) return -4; return result.niter; }
mmult_omp.c
#include "mat.h" /** * An parallelized algorithm for matrix multiplication using OMP. * * @param c : the matrix in which to place the result of the matrix multiplication. * @param a : the first matrix. * @param aRows : the number of rows in a. * @param aCols : the number of columns in a. * @param b : the second matrix. * @param bRows : the number of rows in b. * @param bCols : the number of columns in b. * @return 0 if the matrix multiplication is successful. */ int mmult_omp(double *c, double *a, int aRows, int aCols, double *b, int bRows, int bCols) { int i, j, k; #pragma omp parallel default(none) \ shared(a, b, c, aRows, aCols, bRows, bCols) private(i, k, j) #pragma omp for for (i = 0; i < aRows; i++) { for (j = 0; j < bCols; j++) { c[i*bCols + j] = 0; } for (k = 0; k < aCols; k++) { for (j = 0; j < bCols; j++) { c[i*bCols + j] += a[i*aCols + k] * b[k*bCols + j]; } } } return 0; }
pr27388-1.c
/* PR middle-end/27388 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-omplower" } */ int n, o; void foo (void) { #pragma omp parallel firstprivate (n) { int i; #pragma omp parallel for firstprivate (n) for (i = 0; i < 10; i++) ++n; #pragma omp atomic o += n; } } /* { dg-final { scan-tree-dump-times "shared\\\(i\\\)" 0 "omplower" } } */ /* { dg-final { scan-tree-dump-times "private\\\(i\\\)" 1 "omplower" } } */
target-teams-1.c
/* { dg-do compile } */ /* { dg-additional-options "-fdump-tree-gimple" } */ int v = 6; void bar (int); void bar2 (int, long *, long *); int baz (void); #pragma omp declare target to (bar, baz, v) void foo (int a, int b, long c, long d) { /* The OpenMP 4.5 spec says that these expressions are evaluated before target region on combined target teams, so those cases are always fine. */ #pragma omp target bar (0); #pragma omp target #pragma omp teams bar (1); #pragma omp target teams bar (2); #pragma omp target #pragma omp teams num_teams (4) bar (3); #pragma omp target teams num_teams (4) bar (4); #pragma omp target #pragma omp teams thread_limit (7) bar (5); #pragma omp target teams thread_limit (7) bar (6); #pragma omp target #pragma omp teams num_teams (4) thread_limit (8) { { bar (7); } } #pragma omp target teams num_teams (4) thread_limit (8) bar (8); #pragma omp target #pragma omp teams num_teams (a) thread_limit (b) bar (9); #pragma omp target teams num_teams (a) thread_limit (b) bar (10); #pragma omp target #pragma omp teams num_teams (c + 1) thread_limit (d - 1) bar (11); #pragma omp target teams num_teams (c + 1) thread_limit (d - 1) bar (12); #pragma omp target map (always, to: c, d) #pragma omp teams num_teams (c + 1) thread_limit (d - 1) bar (13); #pragma omp target data map (to: c, d) { #pragma omp target defaultmap (tofrom: scalar) bar2 (14, &c, &d); /* This is one of the cases which can't be generally optimized, the c and d are (or could be) already mapped and whether their device and original values match is unclear. */ #pragma omp target map (to: c, d) #pragma omp teams num_teams (c + 1) thread_limit (d - 1) bar (15); } /* This can't be optimized, there are function calls inside of target involved. */ #pragma omp target #pragma omp teams num_teams (baz () + 1) thread_limit (baz () - 1) bar (16); #pragma omp target teams num_teams (baz () + 1) thread_limit (baz () - 1) bar (17); /* This one can't be optimized, as v might have different value between host and target. */ #pragma omp target #pragma omp teams num_teams (v + 1) thread_limit (v - 1) bar (18); } /* { dg-final { scan-tree-dump-times "num_teams\\(-1\\)" 3 "gimple" } } */ /* { dg-final { scan-tree-dump-times "thread_limit\\(-1\\)" 3 "gimple" } } */ /* { dg-final { scan-tree-dump-times "num_teams\\(0\\)" 4 "gimple" } } */ /* { dg-final { scan-tree-dump-times "thread_limit\\(0\\)" 6 "gimple" } } */ /* { dg-final { scan-tree-dump-times "num_teams\\(1\\)" 2 "gimple" } } */ /* { dg-final { scan-tree-dump-times "thread_limit\\(1\\)" 0 "gimple" } } */
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for OpenMP header file here: #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[argc-1]); // Q2b: set the number of OpenMP threads to be Nthreads here: omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time double start; start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end; end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
declare-target-2.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ extern int a; #pragma omp declare target #pragma omp declare target to (a) #pragma omp end declare target int b; #pragma omp declare target to (b) link (b) /* { dg-error "appears more than once on the same .declare target. directive" } */ int c; #pragma omp declare target (c) #pragma omp declare target link (c) /* { dg-error "specified both in declare target" } */ int foo (void); #pragma omp declare target link (foo) /* { dg-error "is not a variable in clause" } */ struct S; extern struct S d[]; /* { dg-error "array type has incomplete element type" "" { target c } } */ #pragma omp declare target to (d) /* { dg-error "does not have a mappable type in" } */ extern struct S e; #pragma omp declare target link (e) /* { dg-error "does not have a mappable type in" } */ extern int f[]; #pragma omp declare target to (f) /* { dg-error "does not have a mappable type in" } */ int g, h; #pragma omp threadprivate (g, h) #pragma omp declare target to (g) /* { dg-error "is threadprivate variable in" } */ #pragma omp declare target link (h) /* { dg-error "is threadprivate variable in" } */ int j[10]; #pragma omp declare target to (j[0:4]) /* { dg-error "expected" } */ int k, l; #pragma omp declare target int m; #pragma omp end declare target #pragma omp declare target to (k) #pragma omp declare target (k) #pragma omp declare target to (k, m) link (l) #pragma omp declare target link (l) int n, o, s, t; #pragma omp declare target to (n) to (n) /* { dg-error "appears more than once on the same .declare target. directive" } */ #pragma omp declare target link (o, o) /* { dg-error "appears more than once on the same .declare target. directive" } */ #pragma omp declare target (s, t, s) /* { dg-error "appears more than once on the same .declare target. directive" } */ int p, q, r; #pragma omp declare target (p) to (q) /* { dg-error "expected end of line before .to." } */ #pragma omp declare target to (p) (q) link (r) /* { dg-error "expected .#pragma omp. clause before" } */ #pragma omp declare target link (r) (p) /* { dg-error "expected .#pragma omp. clause before" } */ #pragma omp declare target #pragma omp end declare target to (p) /* { dg-error "expected end of line before .to." } */
sw_pluto.h
void sw_pluto(){ printf("- pluto [16x16x16] - \n\n"); /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if (N >= 1) { lbp=0; ubp=floord(N,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7) for (t2=lbp;t2<=ubp;t2++) { for (t3=0;t3<=floord(N,16);t3++) { for (t4=max(1,16*t2);t4<=min(N,16*t2+15);t4++) { lbv=max(1,16*t3); ubv=min(N,16*t3+15); #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { m2[t4][t5] = INT_MIN;; m1[t4][t5] = INT_MIN;; } } } } for (t2=0;t2<=floord(N,8);t2++) { lbp=max(0,ceild(16*t2-N,16)); ubp=min(floord(N,16),t2); #pragma omp parallel for private(lbv,ubv,t4,t5,t6,t7) for (t3=lbp;t3<=ubp;t3++) { if (t2 >= 2*t3+1) { for (t4=16*t2-16*t3;t4<=min(N,16*t2-16*t3+15);t4++) { for (t5=max(1,16*t3);t5<=16*t3+15;t5++) { for (t6=t5+1;t6<=t4;t6++) { m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);; } for (t6=t4+1;t6<=t4+t5;t6++) { m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);; m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);; } H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));; } } } if ((N >= 2) && (t2 == 0) && (t3 == 0)) { m2[1][1] = MAX(m2[1][1] ,H[1][1 -1] + W[1]);; m1[1][1] = MAX(m1[1][1] ,H[1 -1][1] + W[1]);; H[1][1] = MAX(0, MAX( H[1 -1][1 -1] + s(a[1], b[1]), MAX(m1[1][1], m2[1][1])));; for (t5=2;t5<=min(15,N);t5++) { for (t6=2;t6<=t5;t6++) { m2[1][t5] = MAX(m2[1][t5] ,H[1][t5-(t6-1)] + W[(t6-1)]);; } m2[1][t5] = MAX(m2[1][t5] ,H[1][t5-t5] + W[t5]);; m1[1][t5] = MAX(m1[1][t5] ,H[1 -1][t5] + W[1]);; H[1][t5] = MAX(0, MAX( H[1 -1][t5-1] + s(a[1], b[1]), MAX(m1[1][t5], m2[1][t5])));; } } if ((N == 1) && (t2 == 0) && (t3 == 0)) { m2[1][1] = MAX(m2[1][1] ,H[1][1 -1] + W[1]);; m1[1][1] = MAX(m1[1][1] ,H[1 -1][1] + W[1]);; H[1][1] = MAX(0, MAX( H[1 -1][1 -1] + s(a[1], b[1]), MAX(m1[1][1], m2[1][1])));; } if ((t2 >= 2) && (t2 == 2*t3) && (t2 <= floord(N-1,8))) { for (t6=8*t2+1;t6<=16*t2;t6++) { if (t2%2 == 0) { m2[8*t2][8*t2] = MAX(m2[8*t2][8*t2] ,H[8*t2][8*t2-(-8*t2+t6)] + W[(-8*t2+t6)]);; } if (t2%2 == 0) { m1[8*t2][8*t2] = MAX(m1[8*t2][8*t2] ,H[8*t2-(-8*t2+t6)][8*t2] + W[(-8*t2+t6)]);; } } if (t2%2 == 0) { H[8*t2][8*t2] = MAX(0, MAX( H[8*t2-1][8*t2-1] + s(a[8*t2], b[8*t2]), MAX(m1[8*t2][8*t2], m2[8*t2][8*t2])));; } for (t5=8*t2+1;t5<=min(N,8*t2+15);t5++) { for (t6=8*t2+1;t6<=t5;t6++) { if (t2%2 == 0) { m2[8*t2][t5] = MAX(m2[8*t2][t5] ,H[8*t2][t5-(-8*t2+t6)] + W[(-8*t2+t6)]);; } } for (t6=t5+1;t6<=8*t2+t5;t6++) { if (t2%2 == 0) { m2[8*t2][t5] = MAX(m2[8*t2][t5] ,H[8*t2][t5-(-8*t2+t6)] + W[(-8*t2+t6)]);; } if (t2%2 == 0) { m1[8*t2][t5] = MAX(m1[8*t2][t5] ,H[8*t2-(-t5+t6)][t5] + W[(-t5+t6)]);; } } if (t2%2 == 0) { H[8*t2][t5] = MAX(0, MAX( H[8*t2-1][t5-1] + s(a[8*t2], b[8*t2]), MAX(m1[8*t2][t5], m2[8*t2][t5])));; } } } if ((8*t2 == N) && (16*t3 == N)) { for (t6=N+1;t6<=2*N;t6++) { if (N%16 == 0) { m2[N][N] = MAX(m2[N][N] ,H[N][N-(t6-N)] + W[(t6-N)]);; } if (N%16 == 0) { m1[N][N] = MAX(m1[N][N] ,H[N-(t6-N)][N] + W[(t6-N)]);; } } if (N%16 == 0) { H[N][N] = MAX(0, MAX( H[N-1][N-1] + s(a[N], b[N]), MAX(m1[N][N], m2[N][N])));; } } if (t2 <= 2*t3-1) { for (t4=max(1,16*t2-16*t3);t4<=16*t2-16*t3+15;t4++) { for (t5=16*t3;t5<=min(N,16*t3+15);t5++) { for (t6=t4+1;t6<=t5;t6++) { m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);; } for (t6=t5+1;t6<=t4+t5;t6++) { m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);; m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);; } H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));; } } } if (t2 == 2*t3) { for (t4=max(2,8*t2+1);t4<=min(N-1,8*t2+14);t4++) { for (t5=max(1,8*t2);t5<=t4-1;t5++) { for (t6=t5+1;t6<=t4;t6++) { if (t2%2 == 0) { m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);; } } for (t6=t4+1;t6<=t4+t5;t6++) { if (t2%2 == 0) { m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);; } if (t2%2 == 0) { m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);; } } if (t2%2 == 0) { H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));; } } for (t6=t4+1;t6<=2*t4;t6++) { if (t2%2 == 0) { m2[t4][t4] = MAX(m2[t4][t4] ,H[t4][t4-(-t4+t6)] + W[(-t4+t6)]);; } if (t2%2 == 0) { m1[t4][t4] = MAX(m1[t4][t4] ,H[t4-(-t4+t6)][t4] + W[(-t4+t6)]);; } } if (t2%2 == 0) { H[t4][t4] = MAX(0, MAX( H[t4-1][t4-1] + s(a[t4], b[t4]), MAX(m1[t4][t4], m2[t4][t4])));; } for (t5=t4+1;t5<=min(N,8*t2+15);t5++) { for (t6=t4+1;t6<=t5;t6++) { if (t2%2 == 0) { m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);; } } for (t6=t5+1;t6<=t4+t5;t6++) { if (t2%2 == 0) { m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);; } if (t2%2 == 0) { m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);; } } if (t2%2 == 0) { H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));; } } } } if ((N >= 2) && (t2 == 2*t3) && (t2 <= floord(N-1,8)) && (t2 >= ceild(N-14,8))) { for (t5=max(1,8*t2);t5<=N-1;t5++) { for (t6=t5+1;t6<=N;t6++) { if (t2%2 == 0) { m1[N][t5] = MAX(m1[N][t5] ,H[N-(-t5+t6)][t5] + W[(-t5+t6)]);; } } for (t6=N+1;t6<=t5+N;t6++) { if (t2%2 == 0) { m2[N][t5] = MAX(m2[N][t5] ,H[N][t5-(t6-N)] + W[(t6-N)]);; } if (t2%2 == 0) { m1[N][t5] = MAX(m1[N][t5] ,H[N-(-t5+t6)][t5] + W[(-t5+t6)]);; } } if (t2%2 == 0) { H[N][t5] = MAX(0, MAX( H[N-1][t5-1] + s(a[N], b[N]), MAX(m1[N][t5], m2[N][t5])));; } } for (t6=N+1;t6<=2*N;t6++) { if (t2%2 == 0) { m2[N][N] = MAX(m2[N][N] ,H[N][N-(t6-N)] + W[(t6-N)]);; } if (t2%2 == 0) { m1[N][N] = MAX(m1[N][N] ,H[N-(t6-N)][N] + W[(t6-N)]);; } } if (t2%2 == 0) { H[N][N] = MAX(0, MAX( H[N-1][N-1] + s(a[N], b[N]), MAX(m1[N][N], m2[N][N])));; } } if ((t2 == 2*t3) && (t2 <= floord(N-15,8))) { for (t5=max(1,8*t2);t5<=8*t2+14;t5++) { for (t6=t5+1;t6<=8*t2+15;t6++) { if (t2%2 == 0) { m1[(8*t2+15)][t5] = MAX(m1[(8*t2+15)][t5] ,H[(8*t2+15)-(-t5+t6)][t5] + W[(-t5+t6)]);; } } for (t6=8*t2+16;t6<=8*t2+t5+15;t6++) { if (t2%2 == 0) { m2[(8*t2+15)][t5] = MAX(m2[(8*t2+15)][t5] ,H[(8*t2+15)][t5-(-8*t2+t6-15)] + W[(-8*t2+t6-15)]);; } if (t2%2 == 0) { m1[(8*t2+15)][t5] = MAX(m1[(8*t2+15)][t5] ,H[(8*t2+15)-(-t5+t6)][t5] + W[(-t5+t6)]);; } } if (t2%2 == 0) { H[(8*t2+15)][t5] = MAX(0, MAX( H[(8*t2+15)-1][t5-1] + s(a[(8*t2+15)], b[(8*t2+15)]), MAX(m1[(8*t2+15)][t5], m2[(8*t2+15)][t5])));; } } for (t6=8*t2+16;t6<=16*t2+30;t6++) { if (t2%2 == 0) { m2[(8*t2+15)][(8*t2+15)] = MAX(m2[(8*t2+15)][(8*t2+15)] ,H[(8*t2+15)][(8*t2+15)-(-8*t2+t6-15)] + W[(-8*t2+t6-15)]);; } if (t2%2 == 0) { m1[(8*t2+15)][(8*t2+15)] = MAX(m1[(8*t2+15)][(8*t2+15)] ,H[(8*t2+15)-(-8*t2+t6-15)][(8*t2+15)] + W[(-8*t2+t6-15)]);; } } if (t2%2 == 0) { H[(8*t2+15)][(8*t2+15)] = MAX(0, MAX( H[(8*t2+15)-1][(8*t2+15)-1] + s(a[(8*t2+15)], b[(8*t2+15)]), MAX(m1[(8*t2+15)][(8*t2+15)], m2[(8*t2+15)][(8*t2+15)])));; } } } } } /* End of CLooG code */ }
ParallelClusterCreator.h
#include <memory> //#ifdef PARALLEL_EXECUTION //#include <omp.h> //#endif // class for running clustering algorithm on Charts struct ParallelClusterCreator { static uint32_t create_charts(std::map<uint32_t, uint32_t>& chart_id_map, Polyhedron& P, const double cost_threshold, const uint32_t chart_threshold, CLUSTER_SETTINGS cluster_settings) { std::vector<Chart> charts; if(chart_id_map.size() == 0) { // std::cout << "Creating charts from individual faces\n"; // create charts create_initial_charts(charts, P); // populate lookup table to allow quicker determination of whether faces are neighbours when making joins populate_chart_LUT(charts, chart_id_map); } else { // std::cout << "Creating charts from grid based initial splitting\n"; uint32_t num_charts = ClusterCreator::initialise_charts_from_grid_clusters(P, chart_id_map, charts, cluster_settings, chart_threshold); // populate_chart_LUT(charts, chart_id_map); // check that existing chart number is not already lower than threshold /*if (num_charts <= chart_threshold) { std::cout << "Input to chart clusterer already had number of charts below chart threshold" << std::endl; return num_charts; }*/ // recalculate perimeters of charts to ensure they are correct for(auto& chart : charts) { chart.recalculate_perimeter_from_scratch(); chart.create_neighbour_set(chart_id_map); } // return chart_id_map.size(); } // create join bank vector and queue std::vector<std::shared_ptr<JoinOperation>> join_queue; create_joins_from_chart_vector(charts, join_queue, cluster_settings, chart_id_map); // do the clustering cluster_faces(charts, join_queue, cost_threshold, chart_threshold, cluster_settings, chart_id_map); return populate_chart_LUT(charts, chart_id_map); } // builds a chart list where each face of a polyhedron is 1 chart static void create_initial_charts(std::vector<Chart>& charts, Polyhedron& P) { // calculate areas of each face // std::cout << "Calculating face areas...\n"; std::map<face_descriptor, double> fareas; for(face_descriptor fd : faces(P)) { fareas[fd] = CGAL::Polygon_mesh_processing::face_area(fd, P); // std::cout << "Area " << fareas[fd] << std::endl; } // calculate normals of each faces // std::cout << "Calculating face normals...\n"; std::map<face_descriptor, Vector> fnormals; CGAL::Polygon_mesh_processing::compute_face_normals(P, boost::make_assoc_property_map(fnormals)); // get boost face iterator face_iterator fb_boost, fe_boost; boost::tie(fb_boost, fe_boost) = faces(P); // each face begins as its own chart // std::cout << "Creating initial charts..."; for(Facet_iterator fb = P.facets_begin(); fb != P.facets_end(); ++fb) { // init chart instance for face Chart c(charts.size(), std::make_shared<Facet>(*fb), fnormals[*fb_boost], fareas[*fb_boost]); charts.push_back(c); fb_boost++; } // std::cout << "..." << charts.size() << " charts.\n"; } static void create_joins_from_chart_vector(std::vector<Chart>& charts, // std::vector<std::shared_ptr<JoinOperation> > &joins, std::vector<std::shared_ptr<JoinOperation>>& join_queue, CLUSTER_SETTINGS cluster_settings, std::map<uint32_t, uint32_t>& chart_id_map) { // std::cout << "Creating joins from chart list..."; std::set<uint32_t> processed_charts; std::set<uint32_t> chart_neighbours; // for each chart for(auto& chart : charts) { chart_neighbours.clear(); // for each face in chart, find neighbours, add to chart_neighbours set for(auto& face : chart.facets) { // for each edge Halfedge_facet_circulator fc = face->facet_begin(); do { if(!fc->is_border() && !(fc->opposite()->is_border())) // guard against no neighbour at this edge { // get chart id of neighbour, add to set if it is not this chart uint32_t nbr_face_id = fc->opposite()->facet()->id(); uint32_t nbr_chart_id = chart_id_map[nbr_face_id]; if(nbr_chart_id != chart.id) { chart_neighbours.insert(nbr_chart_id); } } } while(++fc != face->facet_begin()); } // create joins... // if neighbours have not already been processed, create join between this and neighbour for(auto& nbr_chart_id : chart_neighbours) { // make sure it hasnt been processed already if(processed_charts.find(nbr_chart_id) == processed_charts.end()) { // chart ids should be equal to their index in the vector at this point JoinOperation join(chart.id, nbr_chart_id, JoinOperation::cost_of_join(charts[chart.id], charts[nbr_chart_id], cluster_settings)); join_queue.push_back(std::make_shared<JoinOperation>(join)); } } // add this chart to set of processed charts, so that it is not considered for new joins processed_charts.insert(chart.id); } // end for each chart // std::cout << join_queue.size() << " joins\n"; } // takes a list of joins and charts, and executes joins until target number of charts/cost threshold is reached static void cluster_faces(std::vector<Chart>& charts, // std::vector<std::shared_ptr<JoinOperation> > &joins, std::vector<std::shared_ptr<JoinOperation>>& join_queue, const double cost_threshold, const uint32_t chart_threshold, CLUSTER_SETTINGS& cluster_settings, std::map<uint32_t, uint32_t>& chart_id_map) { if(join_queue.empty()) { std::cout << "ERROR: join_queue is empty - no joins possible" << std::endl; } // std::cout << "Clustering faces...." << std::endl; // std::stringstream report; // report << "--------------------\nReport:\n----------------------\n"; std::vector<std::shared_ptr<JoinOperation>>::iterator it; // for reporting and calculating when to stop merging const uint32_t initial_charts = charts.size(); const uint32_t desired_merges = initial_charts - chart_threshold; uint32_t chart_merges = 0; int prev_cost_percent = -1; int prev_charts_percent = -1; int overall_percent = -1; // key chart position (in chart vector) :: value - list of pointers to join operations that reference this chart // std::map<uint32_t, std::vector<std::shared_ptr<JoinOperation> > > chart_to_join_inverse_index; // populate_inverse_index(chart_to_join_inverse_index, charts, joins); // join_queue.sort(JoinOperation::sort_join_ptrs); std::sort(join_queue.begin(), join_queue.end(), JoinOperation::sort_join_ptrs); const double lowest_cost = join_queue.front()->cost; // execute lowest join cost and update affected joins. re-sort. // std::cout << "Processing join queue...\n"; while(join_queue.front()->cost < cost_threshold) { //&& !join_queue.empty()) { //&& (charts.size() - chart_merges) > chart_threshold){ if(join_queue.empty()) { break; } // if (join_queue.front()->cost > 1.f) { // std::cout << "joining cost: " << join_queue.front()->cost << " ... remaining joins: " << join_queue.size() << std::endl; //} // reporting------------- if(chart_merges % 1000 == 0) { // std::cout << "joining cost: " << join_queue.front()->cost << " ... remaining joins: " << join_queue.size() << std::endl; int percent = (int)(((join_queue.front()->cost - lowest_cost) / (cost_threshold - lowest_cost)) * 100); if(percent != prev_cost_percent && percent > overall_percent) { prev_cost_percent = percent; overall_percent = percent; std::cout << percent << " percent to cost threshold (" << chart_merges << " merges done)\n"; } } JoinOperation join_todo = *(join_queue.front()); join_queue.erase(join_queue.begin()); // guard against inactive joins if(!join_todo.active) { continue; } // check amount of neighbours resulting chart would have. if too few, skip to next one if(join_todo.results_in_chart_with_neighbours(charts, chart_id_map) < 3) { continue; } // merge faces from chart2 into chart 1 // std::cout << "merging charts " << join_todo.chart1_id << " and " << join_todo.chart2_id << std::endl; // charts[join_todo.get_chart1_id()].merge_with(charts[join_todo.get_chart2_id()], join_todo.cost); charts[join_todo.get_chart1_id()].merge_with(charts[join_todo.get_chart2_id()]); // DEactivate chart 2 if(charts[join_todo.get_chart2_id()].active == false) { // report << "chart " << join_todo.chart2_id << " was already inactive at merge " << chart_merges << std::endl; // should not happen continue; } // DEactivate chart 2 charts[join_todo.get_chart2_id()].active = false; //-------------------------------------------------------------- // update remaining joins that include either of the merged charts //-------------------------------------------------------------- #if 0 // use inverse index to retrieve the joins that need to be updated //merge affected join lists from 2 charts involved (from chart 2 to 1) std::vector<std::shared_ptr<JoinOperation>>& affected_joins = chart_to_join_inverse_index[join_todo.get_chart1_id()]; affected_joins.insert( affected_joins.end() , chart_to_join_inverse_index[join_todo.get_chart2_id()].begin(), chart_to_join_inverse_index[join_todo.get_chart2_id()].end()); // std::cout << "merged: " << affected_joins.size() << "\n"; std::list<uint32_t> indices_to_remove; //for each affected join, update or add to list for removal for (uint32_t i = 0; i < affected_joins.size(); i++){ // std::shared_ptr<JoinOperation> join_op = affected_joins[i]; std::shared_ptr<JoinOperation> join_op ( affected_joins[i] ); //replace expired chart and sorts chart ids join_op->replace_id_with(join_todo.get_chart2_id(), join_todo.get_chart1_id()); //check if this join is within a chart now - add to removal list if (join_op->get_chart1_id() == join_op-> get_chart2_id()) { indices_to_remove.push_back(i); join_op->active = false; } } // std::cout << "to remove: " << indices_to_remove.size() << "\n"; //remove those not needed any more indices_to_remove.sort(); int num_removed = 0; for (auto id : indices_to_remove) { std::vector< std::shared_ptr<JoinOperation> >::iterator it2 = affected_joins.begin(); // adjust ID to be deleted to account for previously deleted items std::advance(it2, id - num_removed); affected_joins.erase(it2); num_removed++; } // std::cout << "after removing: " << affected_joins.size() << "\n"; //remove duplicates in affected joins auto new_end_of_array = std::unique(affected_joins.begin(), affected_joins.end(), JoinOperation::compare); affected_joins.resize( std::distance(affected_joins.begin(),new_end_of_array) ); // std::cout << "after removing duplicates: " << affected_joins.size() << "\n"; //recalculate costs for what is left for (uint32_t i = 0; i < affected_joins.size(); i++){ // std::cout << "join " << i << std::endl; std::shared_ptr<JoinOperation> join_op ( affected_joins[i] ); // std::cout << "got join " << i << std::endl; join_op->cost = JoinOperation::cost_of_join(charts[join_op->get_chart1_id()], charts[join_op->get_chart2_id()], cluster_settings); // std::cout << "costed join " << i << std::endl; } // std::cout << "updated\n"; //resort join queue std::sort(join_queue.begin(),join_queue.end(), JoinOperation::sort_join_ptrs); // std::cout << "sorted\n"; #else // old method of updating join list std::vector<int> to_erase_merged; std::vector<std::shared_ptr<JoinOperation>> to_recalculate_error_merged; //#ifdef PARALLEL_EXECUTION //#pragma omp declare reduction(merge : std::vector <int> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) //#pragma omp declare reduction(merge : std::vector <std::shared_ptr <JoinOperation>> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) // //// find affected joins and add to list for erase/update //#pragma omp parallel for reduction(merge : to_erase_merged, to_recalculate_error_merged) //#endif for(uint32_t j = 0; j < join_queue.size(); j++) { // if join is affected, update references and cost if(join_queue[j]->get_chart1_id() == join_todo.get_chart1_id() || join_queue[j]->get_chart1_id() == join_todo.get_chart2_id() || join_queue[j]->get_chart2_id() == join_todo.get_chart1_id() || join_queue[j]->get_chart2_id() == join_todo.get_chart2_id()) { // eliminate references to joined chart 2 (it is no longer active) // by pointing them to chart 1 if(join_queue[j]->get_chart1_id() == join_todo.get_chart2_id()) { join_queue[j]->set_chart1_id(join_todo.get_chart1_id()); } if(join_queue[j]->get_chart2_id() == join_todo.get_chart2_id()) { join_queue[j]->set_chart2_id(join_todo.get_chart1_id()); } // save this join to be deleted (and replaced in queue if necessary) // to_erase[omp_get_thread_num()].push_back(j); to_erase_merged.push_back(j); // search for duplicates if(join_queue[j]->get_chart1_id() == join_todo.get_chart1_id() && join_queue[j]->get_chart2_id() == join_todo.get_chart2_id()) { // report << "duplicate found : c1 = " << it->chart1_id << ", c2 = " << it->chart2_id << std::endl; // set inactive join_queue[j]->active = false; } // check for joins within a chart else if(join_queue[j]->get_chart1_id() == join_queue[j]->get_chart2_id()) { // report << "Join found within a chart: " << join_queue[j]->chart1_id << std::endl; // set inactive join_queue[j]->active = false; } else { // add (pointer of JO) to vector to be updated // to_recalculate_error[omp_get_thread_num()].push_back(join_queue[j]); to_recalculate_error_merged.push_back(join_queue[j]); } } } // erase all elements that need to be erased (either no longer needed or will be recalculated) std::sort(to_erase_merged.begin(), to_erase_merged.end()); int num_erased = 0; for(auto id : to_erase_merged) { std::vector<std::shared_ptr<JoinOperation>>::iterator it2 = join_queue.begin(); // adjust ID to be deleted to account for previously deleted items std::advance(it2, id - num_erased); join_queue.erase(it2); num_erased++; } #ifdef PARALLEL_EXECUTION // recalculate error for joins that need to be updated #pragma omp parallel for #endif for(uint32_t j = 0; j < to_recalculate_error_merged.size(); j++) { std::shared_ptr<JoinOperation> join_ptr(to_recalculate_error_merged[j]); join_ptr->cost = JoinOperation::cost_of_join(charts[join_ptr->get_chart1_id()], charts[join_ptr->get_chart2_id()], cluster_settings); } // replace joins that were filtered out to be sorted if(to_recalculate_error_merged.size() > 0) { std::sort(to_recalculate_error_merged.begin(), to_recalculate_error_merged.end(), JoinOperation::sort_join_ptrs); std::vector<std::shared_ptr<JoinOperation>>::iterator it2; uint32_t insert_item = 0; for(it2 = join_queue.begin(); it2 != join_queue.end(); ++it2) { // insert items while join list item has bigger cost than element to be inserted while(insert_item < to_recalculate_error_merged.size() && (*it2)->cost > to_recalculate_error_merged[insert_item]->cost) { join_queue.insert(it2, to_recalculate_error_merged[insert_item]); insert_item++; } // if all items are in place, we are done if(insert_item >= to_recalculate_error_merged.size()) { break; } } // add any remaining items to end of queue for(uint32_t i = insert_item; i < to_recalculate_error_merged.size(); i++) { join_queue.push_back(to_recalculate_error_merged[i]); } } #endif chart_merges++; } // std::cout << "--------------------\nCharts:\n----------------------\n"; uint32_t total_faces = 0; uint32_t total_active_charts = 0; for(uint32_t i = 0; i < charts.size(); ++i) { if(charts[i].active) { uint32_t num_faces = charts[i].facets.size(); total_faces += num_faces; total_active_charts++; } } if(!join_queue.empty()) { // std::cout << "joins remaining: " << join_queue.size() << std::endl; std::cout << "Maximum error exceeded. Cost of cheapest join operation: " << join_queue.front()->cost << std::endl; } else { std::cout << "Join list empty" << std::endl; } // std::cout << "Total number of faces in charts = " << total_faces << std::endl; // std::cout << "Initial charts = " << charts.size() << std::endl; // std::cout << "Total number merges = " << chart_merges << std::endl; std::cout << "Num charts after chartification = " << total_active_charts << std::endl; // std::cout << report.str(); } // fill chart_id_map from chart vector static uint32_t populate_chart_LUT(std::vector<Chart>& charts, std::map<uint32_t, uint32_t>& chart_id_map) { chart_id_map.clear(); // populate LUT for face to chart mapping // count charts on the way to apply new chart ids uint32_t active_charts = 0; for(uint32_t id = 0; id < charts.size(); ++id) { auto& chart = charts[id]; if(chart.active) { for(auto& f : chart.facets) { chart_id_map[f->id()] = active_charts; } active_charts++; } } return active_charts; } // fills inverse index linking each chart with joins that reference it static void populate_inverse_index(std::map<uint32_t, std::vector<std::shared_ptr<JoinOperation>>>& chart_to_join_inverse_index, std::vector<Chart>& charts, std::vector<std::shared_ptr<JoinOperation>>& joins) { if(charts.size() == 0) { std::cout << "WARNING: no charts received in populate_inverse_index() \n"; return; } if(joins.size() == 0) { std::cout << "WARNING: no joins received in populate_inverse_index() \n"; return; } if(chart_to_join_inverse_index.size() == 0) { std::cout << "building inverse index from scratch..."; // initialise map? // for (int i = 0; i < charts.size()) } // for each join, add a pointer to the list for each relevant chart for(uint32_t i = 0; i < joins.size(); i++) { // chart_to_join_inverse_index[joins[i].get_chart1_id()].push_back( &(joins[i]) ); // chart_to_join_inverse_index[joins[i].get_chart2_id()].push_back( &(joins[i]) ); chart_to_join_inverse_index[joins[i]->get_chart1_id()].push_back(std::shared_ptr<JoinOperation>(joins[i])); chart_to_join_inverse_index[joins[i]->get_chart2_id()].push_back(std::shared_ptr<JoinOperation>(joins[i])); } std::cout << "Inverse index populated with " << chart_to_join_inverse_index.size() << " entries\n"; // debug only - checking inverse index was created correctly // for(auto& entry : chart_to_join_inverse_index){ // if(entry.second.size() == 0) // std::cout << "Chart with no joins: " << entry.first << std::endl; // } } };
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantumScale*alpha * QuantumScale*beta; ** opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also define that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { double SaSca; if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); SaSca=Sa*PerceptibleReciprocal(Sca); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da*PerceptibleReciprocal(Dca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p+q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p-q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)*PerceptibleReciprocal(Da)+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { MagickRealType alpha, beta; alpha=Dca*PerceptibleReciprocal(Da); if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)*PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) memcpy(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) memcpy(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; value=GetImageArtifact(image,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) != MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { composite.red=(MagickRealType) GetPixelRed(p); composite.opacity=(composite.opacity+canvas.opacity/2); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=QuantumRange-source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) memcpy(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) memcpy(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
GB_unaryop__identity_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_fp32 // op(A') function: GB_tran__identity_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z ; GB_CAST_SIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_fp32 ( int64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin // The HLSL rewriter doesn't define a default matrix pack, // so we must preserve the lack of annotations to avoid changing semantics. bool HasDefaultMatrixPack = false; // Uses of #pragma pack_matrix change the default pack. bool DefaultMatrixPackRowMajor = false; // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *) : Kind(NC_Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); // HLSL Change Begins bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind); // HLSL Change Ends bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter); bool DiagnoseHLSLLookup(const LookupResult &R); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
mlp_mnist_bf16_amx_fused_trans_fused_sgd_numa.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas, Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /* include c-based dnn library */ #include "../common/dnn_common.h" #include "../common/mnist.h" #define TEST_ACCURACY #define OVERWRITE_DOUTPUT_BWDUPD /*#define FUSE_WT_TRANS_SGD*/ /*#define FUSE_ACT_TRANS_FWD*/ /*#define FUSE_DACT_TRANS_BWD*/ #define PRIVATE_WT_TRANS #define PRIVATE_ACT_TRANS #define PRIVATE_DACT_TRANS #define FUSE_SGD_IN_BWD #define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16)) #define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)LIBXSMM_INTRINSICS_MM512_CVT_FP32_BF16((B))) static int threads_per_numa = 0; LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int i; zero_buf_bf16(buf, size); for (i = 0; i < (int)size; ++i) { libxsmm_bfloat16_hp tmp; tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); buf[i] = tmp.i[1]; } } LIBXSMM_INLINE void init_buf_bf16_numa_aware(int threads, int ltid, int ft_mode, libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int chunksize, chunks; int my_numa_node = ltid/threads_per_numa; int n_numa_nodes = threads/threads_per_numa; int l = 0; if (ft_mode == 0) { /* Mode 0 : Block cyclic assignment to NUMA nodes */ int bufsize = size * 2; chunksize = 4096; chunks = (bufsize + chunksize - 1)/chunksize; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : bufsize - (chunks-1) * chunksize; if ( l % n_numa_nodes == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16*) buf+l*(chunksize/2), _chunksize/2, 0, 0 ); } } } else { /* Mode 1: Block assignement to NUMA nodes */ chunks = n_numa_nodes; chunksize = (size + chunks - 1) /chunks; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : size - (chunks-1) * chunksize; if ( l == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16*) buf+l*chunksize, _chunksize, 0, 0 ); } } } } void init_buffer_block_numa(libxsmm_bfloat16* buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) # pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 1, buf, size, 0, 0); } } } void init_buffer_block_cyclic_numa(libxsmm_bfloat16* buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) # pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 0, buf, size, 0, 0); } } } #if 0 LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk) { int k1, k2, c1, c2; int kBlocks = K/bk; int cBlocks = C/bc; LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk); LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc/2, bk, 2); for (k1 = 0; k1 < kBlocks; k1++) { for (c1 = 0; c1 < cBlocks; c1++) { for (c2 = 0; c2 < bc; c2++) { for (k2 = 0; k2 < bk; k2++) { LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2/2, k2, c2%2, cBlocks, bc/2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk); } } } } } #endif typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; libxsmm_blasint opt_2d_blocking; libxsmm_blasint opt_col_teams; libxsmm_blasint opt_row_teams; float lr; size_t scratch_size; libxsmm_barrier* barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier* barrier; my_eltwise_fuse fuse_type; } my_smax_bwd_config; typedef struct my_vnni_reformat_config { libxsmm_blasint C; libxsmm_blasint N; libxsmm_blasint bc; libxsmm_blasint bn; libxsmm_blasint threads; libxsmm_barrier* barrier; my_eltwise_fuse fuse_type; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary fused_relu_kernel; } my_vnni_reformat_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; libxsmm_blasint fwd_M_hyperpartitions; libxsmm_blasint fwd_N_hyperpartitions; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_bsmmfunction fwd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2; libxsmm_bmmfunction_reducebatch_strd gemm_fwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel; libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_zero_kernel; libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint bwd_M_hyperpartitions; libxsmm_blasint bwd_N_hyperpartitions; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint upd_M_hyperpartitions; libxsmm_blasint upd_N_hyperpartitions; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; libxsmm_blasint fuse_relu_bwd; size_t bwd_private_tr_wt_scratch_mark; size_t upd_private_tr_act_scratch_mark; size_t upd_private_tr_dact_scratch_mark; size_t scratch_size; size_t doutput_scratch_mark; libxsmm_barrier* barrier; libxsmm_bsmmfunction bwd_config_kernel; libxsmm_bsmmfunction upd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2; libxsmm_bmmfunction_reducebatch_strd gemm_bwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_bwd5; libxsmm_meltwfunction_unary bwd_fused_relu_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_upd; libxsmm_bsmmfunction_reducebatch_strd gemm_upd2; libxsmm_bmmfunction_reducebatch_strd gemm_upd3; libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary upd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary bwd_relu_kernel; libxsmm_meltwfunction_unary bwd_zero_kernel; libxsmm_meltwfunction_unary upd_zero_kernel; libxsmm_meltwfunction_unary delbias_reduce_kernel; libxsmm_meltwfunction_unary vnni_to_vnniT_kernel; libxsmm_meltwfunction_unary norm_to_normT_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel_wt; float lr; } my_fc_bwd_config; my_vnni_reformat_config setup_my_vnni_reformat(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_vnni_reformat_config res; libxsmm_blasint ld = bc; res.N = N; res.C = C; res.bn = bn; res.bc = bc; res.threads = threads; res.fuse_type = fuse_type; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); res.fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ld, &ld, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.fused_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fused_relu_kernel failed. Bailing...!\n"); exit(-1); } return res; } my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero = bk*bn; libxsmm_blasint ld_upconvert = K; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_meltw_flags fusion_flags; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 1; if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else if (threads == 14) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 7; } else if (threads == 56) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 1; res.fwd_row_teams = 14; res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 4; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.C/res.bc)/res.fwd_bf; res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if ( res.fwd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n"); exit(-1); } res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_fwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C; res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd4 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C; res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd5 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C; res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd6 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C; res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd7 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C; res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd8 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( res.fwd_cvtfp32bf16_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID); if ( res.fwd_sigmoid_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if ( res.tilerelease_kernel == NULL ) { fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.fwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY ); if ( res.fwd_colbcast_bf16fp32_copy_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_copy_bf16fp32_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K)); return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type, float lr) { my_fc_bwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero_bwd = bc*bn; libxsmm_blasint ld_zero_upd = bk; libxsmm_blasint delbias_K = K; libxsmm_blasint delbias_N = N; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_blasint updM; libxsmm_blasint updN; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; size_t size_bwd_scratch; size_t size_upd_scratch; libxsmm_blasint bbk; libxsmm_blasint bbc; libxsmm_blasint ldaT = bc; libxsmm_blasint ldb_orig= bc; libxsmm_meltw_flags fusion_flags_bwd; libxsmm_meltw_operation bwd_fused_op; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; res.fuse_relu_bwd = 0; res.lr = lr; /* setup parallelization strategy */ res.bwd_M_hyperpartitions = 1; res.upd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 1; res.upd_N_hyperpartitions = 1; if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 8; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 14) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 7; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 7; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 56) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 1; res.bwd_row_teams = 14; res.bwd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 4; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 1; res.upd_row_teams = 14; res.upd_M_hyperpartitions = 1; res.upd_N_hyperpartitions = 4; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } bbk = (res.upd_2d_blocking == 1) ? bk : bk/res.ofm_subtasks; bbc = (res.upd_2d_blocking == 1) ? bc : bc/res.ifm_subtasks; #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif if (res.bwd_2d_blocking != 1) { printf("Requested private wt transposes, but for the current # of threads the bwd decomposition is not 2D. Will perform upfront/shared wt transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private act transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared act transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private dact transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared dact transposes...\n"); } /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.K/res.bk)/res.bwd_bf; res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL); if ( res.gemm_bwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n"); exit(-1); } res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL); if ( res.bwd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.bwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.bwd_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.bwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernel */ res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT); if ( res.vnni_to_vnniT_kernel == NULL ) { fprintf( stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n"); exit(-1); } bwd_fused_op = LIBXSMM_MELTW_OPERATION_COLBIAS_ACT; fusion_flags_bwd = LIBXSMM_MELTW_FLAG_ACT_RELU_BWD_OVERWRITE_C; res.gemm_bwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL, bwd_fused_op, LIBXSMM_DATATYPE_BF16, fusion_flags_bwd, 0, 0, 0, 0); if ( res.gemm_bwd5 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd5 failed. Bailing...!\n"); exit(-1); } if (((fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) && (res.upd_2d_blocking == 1)) { res.fuse_relu_bwd = 1; } res.bwd_fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.bwd_fused_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_fused_relu_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bn; ldc = res.bk; updM = res.bk/res.ofm_subtasks; updN = res.bc/res.ifm_subtasks; l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.N/res.bn)/res.upd_bf; res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_upd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C; res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n"); exit(-1); } res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if ( res.upd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if ( res.tilerelease_kernel == NULL ) { fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.upd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.upd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if ( res.delbias_reduce_kernel == NULL ) { fprintf( stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernels */ res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_vnni_kernel_wt = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel_wt == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if ( res.norm_to_normT_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K; res.bwd_private_tr_wt_scratch_mark = size_bwd_scratch; size_bwd_scratch += res.threads * res.bc * res.K * sizeof(libxsmm_bfloat16); size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K)); res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K; res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) ; res.upd_private_tr_dact_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bk * res.N * sizeof(libxsmm_bfloat16); res.upd_private_tr_act_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bc * res.N * (((res.C/res.bc)+res.upd_col_teams-1)/res.upd_col_teams) * sizeof(libxsmm_bfloat16); return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; if (threads == 16) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 8; } else if (threads == 14) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 7; } else { res.opt_2d_blocking = 0; res.opt_col_teams = 1; res.opt_row_teams = 1; } res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2);; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2); return res; } void my_fc_fwd_exec( my_fc_fwd_config cfg, const libxsmm_bfloat16* wt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const libxsmm_bfloat16* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch ) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = cfg.bc/lpb; /* const libxsmm_blasint bc = cfg.bc;*/ libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb); LIBXSMM_VLA_DECL(4, float, output_f32, (float*)scratch, nBlocksOFm, bn, bk); libxsmm_meltw_gemm_param gemm_eltwise_params; float* fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float*)scratch + ltid * cfg.K : NULL; LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) bias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32); libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel; libxsmm_meltw_unary_param eltwise_params_act; libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise; libxsmm_meltw_unary_param copy_params; unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5; } else { bf16_batchreduce_kernel_zerobeta_fused_eltwise = NULL; } BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm/BF; blocks = CB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksMB, hyperteam_id; col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksOFm = nBlocksOFm/cfg.fwd_M_hyperpartitions; _nBlocksMB = nBlocksMB/cfg.fwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.fwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.fwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksOFm); } /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.fwd_config_kernel(NULL, NULL, NULL); if (use_2d_blocking == 1) { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } } else { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } } else { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } else { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } cfg.tilerelease_kernel(NULL, NULL, NULL); libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec( my_fc_bwd_config cfg, libxsmm_bfloat16* wt_ptr, libxsmm_bfloat16* din_act_ptr, const libxsmm_bfloat16* dout_act_ptr, libxsmm_bfloat16* dwt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch, float *fil_master ) { /* size variables, all const */ /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc/lpb; const libxsmm_blasint bk_lp = bk/lpb; const libxsmm_blasint bn_lp = bn/lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0; libxsmm_blasint performed_doutput_transpose = 0; libxsmm_meltw_unary_param trans_param; unsigned int i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) dbias_ptr : NULL, cfg.bk); libxsmm_blasint ext_blocks = (cfg.fuse_relu_bwd == 1) ? nBlocksIFm : nBlocksOFm; libxsmm_blasint int_blocks = (cfg.fuse_relu_bwd == 1) ? cfg.bc : cfg.bk; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, ext_blocks, cfg.bn, int_blocks/32); libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16*)dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)scratch; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16*)dout_act_ptr, nBlocksOFm, bn, bk); libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb); libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_gemm_param eltwise_params_bwd; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.bwd_config_kernel(NULL, NULL, NULL); if (cfg.upd_2d_blocking == 0) { /* Apply to doutput potential fusions */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1/nBlocksOFm; ofm1 = mb1ofm1%nBlocksOFm; relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); relu_kernel(&relu_params); /* If in UPD pass, also perform transpose of doutput */ if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { performed_doutput_transpose = 1; } libxsmm_barrier_wait(cfg.barrier, ltid); } /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } } if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ){ libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16*)scratch, nBlocksOFm, bk_lp, bc, lpb); float* temp_output = (float*)scratch + (cfg.C * cfg.K)/2; LIBXSMM_VLA_DECL(4, float, dinput_f32, (float*) temp_output, nBlocksIFm, bn, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm/BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksIFm, _nBlocksMB, hyperteam_id; col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksIFm = nBlocksIFm/cfg.bwd_M_hyperpartitions; _nBlocksMB = nBlocksMB/cfg.bwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.bwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.bwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksIFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksIFm); my_M_end = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksIFm); } /* transpose weight */ if (cfg.bwd_2d_blocking == 0) { for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_filter_tr, ((libxsmm_bfloat16*)((char*)scratch + cfg.bwd_private_tr_wt_scratch_mark)) + ltid * bc * cfg.K, bk_lp, bc, lpb); if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = ofm1*KB_BLOCKS; ofm2 < (ofm1+1)*KB_BLOCKS; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm1*KB_BLOCKS, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); if (cfg.fuse_relu_bwd > 0) { relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc/32); cfg.bwd_fused_relu_kernel(&relu_params); } } } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = 0; ofm2 < nBlocksOFm; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (cfg.fuse_relu_bwd > 0) { eltwise_params_bwd.relu_bitmask_bwd = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc/32); cfg.gemm_bwd5( &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks, &eltwise_params_bwd); } else { cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } } else { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } else { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (cfg.upd_2d_blocking == 1) { /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } } } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, mb3 = 0, bfn = 0, mb1ifm1 = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB/BF; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16* )in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)dwt_ptr, nBlocksIFm, bc_lp, bk, lpb); /* Set up tensors for transposing/scratch before vnni reformatting dfilter */ libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16*) ((libxsmm_bfloat16*)scratch + cfg.N * cfg.K); float *dfilter_f32_ptr = (float*) ((libxsmm_bfloat16*)tr_inp_ptr + cfg.N * cfg.C); #ifndef BYPASS_SGD libxsmm_bfloat16 *dfilter_scratch = (libxsmm_bfloat16*) ((float*)dfilter_f32_ptr + cfg.C * cfg.K) + ltid * bc * bk; #endif LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16*)tr_inp_ptr, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float*)dfilter_f32_ptr, nBlocksIFm, bc, bk); #ifndef BYPASS_SGD LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dfilter_block, (libxsmm_bfloat16*)dfilter_scratch, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float*)fil_master, nBlocksIFm, bc, bk); #endif const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm; const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1); const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm; const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1); const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work; const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksIFm, hyperteam_id; col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksOFm = nBlocksOFm/cfg.upd_M_hyperpartitions; _nBlocksIFm = nBlocksIFm/cfg.upd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.upd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.upd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksIFm + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksIFm); my_N_end = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksIFm); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksOFm); } if (cfg.upd_2d_blocking == 0) { /* Required upfront tranposes */ for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if (cfg.upd_2d_blocking == 0) { if (performed_doutput_transpose == 0) { for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_input_tr, ((libxsmm_bfloat16*)((char*)scratch + cfg.upd_private_tr_act_scratch_mark)) + ltid * bc * cfg.N * N_tasks_per_thread, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_doutput_tr, ((libxsmm_bfloat16*)((char*)scratch + cfg.upd_private_tr_dact_scratch_mark)) + ltid * bk * cfg.N, bn_lp, bk, lpb); ifm2 = 0; ofm2 = 0; if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if ((bc % 16 == 0) && (bk % 16 == 0)) { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } } else { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk), &blocks); trans_param.in.primary = &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); cfg.norm_to_vnni_kernel_wt(&trans_param); { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } } } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = bfn*blocks; mb3 < (bfn+1)*blocks; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = bfn*blocks; mb3 < (bfn+1)*blocks; mb3++) { trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, bfn*blocks, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } #endif } } } } } } else { if (BF == 1) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } #endif } } else { for (bfn = 0; bfn < BF; bfn++) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } #endif } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } cfg.tilerelease_kernel(NULL, NULL, NULL); } void my_opt_exec( my_opt_config cfg, libxsmm_bfloat16* wt_ptr, float* master_wt_ptr, const libxsmm_bfloat16* delwt_ptr, int start_tid, int my_tid, void* scratch ) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc/lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); #if defined(__AVX512BW__) __m512 vlr = _mm512_set1_ps( cfg.lr ); if (cfg.opt_2d_blocking == 1) { libxsmm_blasint ofm1, ifm1; libxsmm_blasint col_teams = cfg.opt_col_teams; libxsmm_blasint row_teams = cfg.opt_row_teams; libxsmm_blasint my_row_id = ltid % row_teams; libxsmm_blasint my_col_id = ltid / row_teams; libxsmm_blasint N_tasks_per_thread = (nBlocksIFm + col_teams-1)/col_teams; libxsmm_blasint M_tasks_per_thread = (nBlocksOFm + row_teams-1)/row_teams; libxsmm_blasint my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksOFm); libxsmm_blasint my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksOFm); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)delwt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float*)master_wt_ptr, nBlocksIFm, bc, bk); libxsmm_bfloat16 *wt_bf16, *dwt_bf16; float *wt_fp32; for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } } } else { libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */ for ( i = thr_begin; i < thr_begin+iv; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( master_wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_load_fil( delwt_ptr + i ) ) ); _mm512_store_fil( wt_ptr+i, newfilter ); _mm512_storeu_ps( master_wt_ptr+i, newfilter ); } for ( i = thr_begin+iv; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } } #else for ( i = thr_begin; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #endif libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_fwd_exec( my_smax_fwd_config cfg, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; libxsmm_bfloat16* poutput_bf16 = out_act_ptr; const libxsmm_bfloat16* pinput_bf16 = in_act_ptr; float* poutput_fp32 = (float*)scratch; float* pinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.i[0] = 0; in.i[1] = pinput_bf16[i]; pinput_fp32[i] = in.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { float max = FLT_MIN; float sum_of_exp = 0.0f; img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) { max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } } /* sum exp over outputs */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) ); sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } /* scale output */ sum_of_exp = 1.0f/sum_of_exp; for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp; } } } libxsmm_barrier_wait( cfg.barrier, ltid ); /* calculate loss single threaded */ if ( ltid == 0 ) { (*loss) = 0.0f; for ( img1 = 0; img1 < Bn; ++img1 ) { for ( img2 = 0; img2 <bn; ++img2 ) { libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ); libxsmm_blasint ifm1b = ifm/bc; libxsmm_blasint ifm2b = ifm%bc; float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN; *loss += LIBXSMM_LOGF( val ); } } *loss = ((-1.0f)*(*loss))/cfg.N; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = poutput_fp32[i]; poutput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_vnni_reformat_exec( my_vnni_reformat_config cfg, libxsmm_bfloat16* delin_act_ptr, libxsmm_bfloat16* tr_delin_act_ptr, unsigned char* relu_ptr, int start_tid, int my_tid ) { /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint lpb = 2; const libxsmm_blasint bn_lp = bn/lpb; libxsmm_blasint mb1ifm1, mb1, ifm1; libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_param relu_params; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksIFm, cfg.bn, cfg.bc/32); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, tr_dinput, (libxsmm_bfloat16* )tr_delin_act_ptr, nBlocksMB, bn_lp, bc, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )delin_act_ptr, nBlocksIFm, bn, bc); /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; LIBXSMM_UNUSED( trans_param ); LIBXSMM_UNUSED( tr_dinput_ ); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc/32); cfg.fused_relu_kernel(&relu_params); } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_smax_bwd_exec( my_smax_bwd_config cfg, libxsmm_bfloat16* delin_act_ptr, const libxsmm_bfloat16* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0f/cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; const libxsmm_bfloat16* poutput_bf16 = out_act_ptr; libxsmm_bfloat16* pdinput_bf16 = delin_act_ptr; float* poutput_fp32 = (float*)scratch; float* pdinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp out; out.i[0] = 0; out.i[1] = poutput_bf16[i]; poutput_fp32[i] = out.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = pdinput_fp32[i]; pdinput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void init_master_weights( my_opt_config cfg, float* master_wt_ptr, size_t size) { #if 0 if (0/* && cfg.upd_N_hyperpartitions != 1 */) { /*TODO: add hyperpartitions (?)*/ /* Spread out weights in a blocked fasion since we partition the MODEL dimenstion */ init_buffer_block_numa((libxsmm_bfloat16*) master_wt_ptr, size/2); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa((libxsmm_bfloat16*) master_wt_ptr, size/2); } #endif } void init_weights( my_fc_fwd_config cfg, libxsmm_bfloat16* wt_ptr, size_t size) { if (cfg.fwd_M_hyperpartitions != 1) { /* Spread out weights in a blocked fasion since we partition the MODEL dimenstion */ init_buffer_block_numa(wt_ptr, size); } else { /* Init weights in a block fashion */ init_buffer_block_cyclic_numa(wt_ptr, size); } } void init_dweights( my_fc_bwd_config cfg, libxsmm_bfloat16* dwt_ptr, size_t size) { if (cfg.upd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(dwt_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(dwt_ptr, size); } } void init_acts( my_fc_fwd_config cfg, libxsmm_bfloat16* act_ptr, size_t size) { if (cfg.fwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(act_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(act_ptr, size); } } void init_delacts( my_fc_bwd_config cfg, libxsmm_bfloat16* delact_ptr, size_t size) { if (cfg.bwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(delact_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(delact_ptr, size); } } int main(int argc, char* argv[]) { libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm; float **fil_master; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config* my_fc_fwd; my_fc_bwd_config* my_fc_bwd; my_opt_config* my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; my_vnni_reformat_config my_vnni_reformat; void* scratch = NULL; size_t scratch_size = 0; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double act_size = 0.0; double fil_size = 0.0; float lr = 0.1f; float loss_weight = 1.0f; float loss = 0.0; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); char* env_threads_per_numa; if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 7; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* Read env variables */ env_threads_per_numa = getenv("THREADS_PER_NUMA"); if ( 0 == env_threads_per_numa ) { printf("please specify THREADS_PER_NUMA to a non-zero value!\n"); return -1; } else { threads_per_numa = atoi(env_threads_per_numa); } /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0*fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0*fil_size) + (2.0*act_size) ); /* allocate data */ act_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) ); delact_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers+2; ++i ) { act_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); } } fil_master = (float**) malloc( num_layers*sizeof(float*) ); fil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delfil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_master[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); fil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delfil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } bias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delbias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delbias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); /* allocating handles */ my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) ); my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) ); my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) ); /* setting up handles + scratch */ size_t max_bwd_scratch_size = 0, max_doutput_scratch_mark = 0; scratch_size = 0; /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { /* MNIST Specific where everywhere we use relu act except the last layer */ if ( i < num_layers -1 ) { my_fuse = MY_ELTWISE_FUSE_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse, lr); my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr ); if (my_fc_bwd[i].scratch_size > 0 && my_fc_bwd[i].scratch_size > max_bwd_scratch_size) { max_bwd_scratch_size = my_fc_bwd[i].scratch_size; } if (my_fc_bwd[i].doutput_scratch_mark > 0 && my_fc_bwd[i].doutput_scratch_mark > max_doutput_scratch_mark) { max_doutput_scratch_mark = my_fc_bwd[i].doutput_scratch_mark; } /* let's allocate and bind scratch */ if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { scratch_size = alloc_size; } } } /* softmax+loss is treated as N+! layer */ my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads ); my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight); my_vnni_reformat = setup_my_vnni_reformat(MB, C[num_layers], (MB % bn == 0) ? bn : MB, (C[num_layers] % bk == 0) ? bk : C[num_layers], nThreads, my_fuse); if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { scratch_size = alloc_size; } } scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { init_acts(my_fc_fwd[i], act_libxsmm[i], MB*C[i]); } for ( i = 0 ; i < num_layers+1; ++i ) { init_delacts(my_fc_bwd[i], delact_libxsmm[i], MB*C[i]); } for ( i = 0 ; i < num_layers; ++i ) { /*init_master_weights(my_opt[i], fil_master[i], C[i]*C[i+1] );*/ my_init_buf( fil_master[i], C[i]*C[i+1], 0, 0 ); libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] ); /*init_weights(my_fc_fwd[i], fil_libxsmm[i], C[i]*C[i+1]);*/ init_dweights(my_fc_bwd[i], delfil_libxsmm[i], C[i]*C[i+1]); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( delbias_libxsmm[i], C[i+1], 0, 0 ); } zero_buf_int32( label_libxsmm, MB ); /* Reading in the MNIST dataset */ int n_batches = NUM_TRAIN/MB, batch_id = 0; int n_epochs = iters, epoch_id = 0; libxsmm_bfloat16 *input_acts = (libxsmm_bfloat16*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(libxsmm_bfloat16), 2097152); /* Read in input data */ char *train_image_path = "../mlpdriver/mnist_data/train-images.idx3-ubyte"; char *train_label_path = "../mlpdriver/mnist_data/train-labels.idx1-ubyte"; char *test_image_path = "../mlpdriver/mnist_data/t10k-images.idx3-ubyte"; char *test_label_path = "../mlpdriver/mnist_data/t10k-labels.idx1-ubyte"; load_mnist(train_image_path, train_label_path, test_image_path, test_label_path); /* Format the input layer in NCNC blocked format */ int _i, _j; for (_i = 0; _i < n_batches*MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float) train_image[_i][_j] : (float)0.0; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 ); } } printf("###########################################\n"); printf("# Training MNIST with %d training samples #\n", n_batches*MB); printf("###########################################\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j,epoch_id,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) { for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, &loss, 0, tid, scratch ); if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) { printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss); } my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, 0, tid, scratch ); for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], (my_fc_bwd[i].fuse_relu_bwd > 0) ? relumask_libxsmm[i-1] : relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, fil_master[i] ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, fil_master[0] ); } } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)((double)n_epochs *(double)n_batches)); printf("fp time = %.5g\n", ((double)(l_total/((double)n_epochs *(double)n_batches)))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/((double)n_epochs *(double)n_batches))), gflop/l_total); #ifdef TEST_ACCURACY /* Test accuracy */ n_batches = NUM_TEST/MB; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float) test_image[_i][_j] : 0.0; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 ); } } n_batches = NUM_TEST/MB; unsigned int hits = 0; unsigned int samples = 0; #if defined(_OPENMP) # pragma omp parallel private(i,j,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], test_label + batch_id * MB, &loss, 0, tid, scratch ); if (tid == 0) { for (_i = 0; _i < MB; _i++) { int label = *(test_label + batch_id * MB + _i); int max_id = 0; float max_val = 0.0; libxsmm_convert_bf16_f32( act_libxsmm[num_layers+1] + _i * 10, &max_val, 1 ); /* Find predicted label */ for (_j = 1; _j < 10; _j++) { libxsmm_bfloat16 val = *(act_libxsmm[num_layers+1] + _i * 10 + _j); float f32_val; libxsmm_convert_bf16_f32( &val, &f32_val, 1 ); if (f32_val > max_val) { max_id = _j; max_val = f32_val; } } /* Compare with true label */ if (max_id == label) { hits++; } samples++; } } #pragma omp barrier } } printf("Accuracy is %f %% (%d test samples)\n", (1.0*hits)/(1.0*samples)*100.0, samples); #endif /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i+1]); libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); libxsmm_free(fil_master[i]); } libxsmm_free(act_libxsmm[num_layers+1]); libxsmm_free(label_libxsmm); libxsmm_free(input_acts); free( my_opt ); free( my_fc_fwd ); free( my_fc_bwd ); free( act_libxsmm ); free( delact_libxsmm ); free( fil_master ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( C ); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
mixed_tentusscher_myo_epi_2004_S2.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rc) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
1.norace7.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(auto) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i][j - 1]; } // CHECK: Region is Data Race Free. // END
periodic_function.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file periodic_function.h * * \brief Contains declaration and partial implementation of sirius::Periodic_function class. */ #ifndef __PERIODIC_FUNCTION_H__ #define __PERIODIC_FUNCTION_H__ #include "simulation_context.h" #include "spheric_function.h" #include "smooth_periodic_function.h" namespace sirius { /// Representation of the periodical function on the muffin-tin geometry. /** Inside each muffin-tin the spherical expansion is used: * \f[ * f({\bf r}) = \sum_{\ell m} f_{\ell m}(r) Y_{\ell m}(\hat {\bf r}) * \f] * or * \f[ * f({\bf r}) = \sum_{\ell m} f_{\ell m}(r) R_{\ell m}(\hat {\bf r}) * \f] * In the interstitial region function is stored on the real-space grid or as a Fourier series: * \f[ * f({\bf r}) = \sum_{{\bf G}} f({\bf G}) e^{i{\bf G}{\bf r}} * \f] */ template<typename T> class Periodic_function: public Smooth_periodic_function<T> { private: /// Complex counterpart for a given type T. typedef typename type_wrapper<T>::complex_t complex_t; Simulation_context const& ctx_; Unit_cell const& unit_cell_; Step_function const& step_function_; Communicator const& comm_; /// Local part of muffin-tin functions. mdarray<Spheric_function<spectral, T>, 1> f_mt_local_; /// Global muffin-tin array mdarray<T, 3> f_mt_; Gvec const& gvec_; /// Size of the muffin-tin functions angular domain size. int angular_domain_size_; /// Set pointer to local part of muffin-tin functions void set_local_mt_ptr() { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); f_mt_local_(ialoc) = Spheric_function<spectral, T>(&f_mt_(0, 0, ia), angular_domain_size_, unit_cell_.atom(ia).radial_grid()); } } /* forbid copy constructor */ Periodic_function(const Periodic_function<T>& src) = delete; /* forbid assigment operator */ Periodic_function<T>& operator=(const Periodic_function<T>& src) = delete; public: /// Constructor Periodic_function(Simulation_context& ctx__, int angular_domain_size__) : Smooth_periodic_function<T>(ctx__.fft(), ctx__.gvec_partition()) , ctx_(ctx__) , unit_cell_(ctx__.unit_cell()) , step_function_(ctx__.step_function()) , comm_(ctx__.comm()) , gvec_(ctx__.gvec()) , angular_domain_size_(angular_domain_size__) { if (ctx_.full_potential()) { f_mt_local_ = mdarray<Spheric_function<spectral, T>, 1>(unit_cell_.spl_num_atoms().local_size()); } } /// Allocate memory for muffin-tin part. void allocate_mt(bool allocate_global__) { if (ctx_.full_potential()) { if (allocate_global__) { f_mt_ = mdarray<T, 3>(angular_domain_size_, unit_cell_.max_num_mt_points(), unit_cell_.num_atoms(), memory_t::host, "f_mt_"); set_local_mt_ptr(); } else { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); f_mt_local_(ialoc) = Spheric_function<spectral, T>(angular_domain_size_, unit_cell_.atom(ia).radial_grid()); } } } } /// Syncronize global muffin-tin array. void sync_mt() { PROFILE("sirius::Periodic_function::sync_mt"); assert(f_mt_.size() != 0); int ld = angular_domain_size_ * unit_cell_.max_num_mt_points(); comm_.allgather(&f_mt_(0, 0, 0), ld * unit_cell_.spl_num_atoms().global_offset(), ld * unit_cell_.spl_num_atoms().local_size()); } /// Zero the function. void zero() { f_mt_.zero(); this->f_rg_.zero(); this->f_pw_local_.zero(); if (ctx_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { f_mt_local_(ialoc).zero(); } } } inline void copy_to_global_ptr(T* f_mt__, T* f_it__) const { std::memcpy(f_it__, this->f_rg_.template at<CPU>(), this->fft_->local_size() * sizeof(T)); if (ctx_.full_potential()) { mdarray<T, 3> f_mt(f_mt__, angular_domain_size_, unit_cell_.max_num_mt_points(), unit_cell_.num_atoms()); for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); std::memcpy(&f_mt(0, 0, ia), &f_mt_local_(ialoc)(0, 0), f_mt_local_(ialoc).size() * sizeof(T)); } int ld = angular_domain_size_ * unit_cell_.max_num_mt_points(); comm_.allgather(f_mt__, ld * unit_cell_.spl_num_atoms().global_offset(), ld * unit_cell_.spl_num_atoms().local_size()); } } using Smooth_periodic_function<T>::add; /// Add the function void add(Periodic_function<T>* g) { PROFILE("sirius::Periodic_function::add"); /* add regular-grid part */ Smooth_periodic_function<T>::add(*g); /* add muffin-tin part */ if (ctx_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) f_mt_local_(ialoc) += g->f_mt(ialoc); } } T integrate(std::vector<T>& mt_val, T& it_val) { PROFILE("sirius::Periodic_function::integrate"); it_val = 0; if (!ctx_.full_potential()) { #pragma omp parallel { T it_val_t = 0; #pragma omp for schedule(static) for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { it_val_t += this->f_rg_(irloc); } #pragma omp critical it_val += it_val_t; } } else { for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { it_val += this->f_rg_(irloc) * step_function_.theta_r(irloc); } } it_val *= (unit_cell_.omega() / this->fft_->size()); this->fft_->comm().allreduce(&it_val, 1); T total = it_val; if (ctx_.full_potential()) { mt_val = std::vector<T>(unit_cell_.num_atoms(), 0); for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); mt_val[ia] = f_mt_local_(ialoc).component(0).integrate(2) * fourpi * y00; } comm_.allreduce(&mt_val[0], unit_cell_.num_atoms()); for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { total += mt_val[ia]; } } return total; } template <index_domain_t index_domain> inline T& f_mt(int idx0, int ir, int ia) { switch (index_domain) { case index_domain_t::local: { return f_mt_local_(ia)(idx0, ir); } case index_domain_t::global: { return f_mt_(idx0, ir, ia); } } } template <index_domain_t index_domain> inline T const& f_mt(int idx0, int ir, int ia) const { switch (index_domain) { case index_domain_t::local: { return f_mt_local_(ia)(idx0, ir); } case index_domain_t::global: { return f_mt_(idx0, ir, ia); } } } /** \todo write and read distributed functions */ void hdf5_write(std::string storage_file_name__, std::string path__) { auto v = this->gather_f_pw(); if (ctx_.comm().rank() == 0) { HDF5_tree fout(storage_file_name, hdf5_access_t::read_write); fout[path__].write("f_pw", reinterpret_cast<double*>(v.data()), static_cast<int>(v.size() * 2)); if (ctx_.full_potential()) { fout[path__].write("f_mt", f_mt_); } } } void hdf5_read(HDF5_tree h5f__, mdarray<int, 2>& gvec__) { std::vector<double_complex> v(gvec_.num_gvec()); h5f__.read("f_pw", reinterpret_cast<double*>(v.data()), static_cast<int>(v.size() * 2)); std::map<vector3d<int>, int> local_gvec_mapping; for (int igloc = 0; igloc < gvec_.count(); igloc++) { int ig = gvec_.offset() + igloc; auto G = gvec_.gvec(ig); local_gvec_mapping[G] = igloc; } for (int ig = 0; ig < gvec_.num_gvec(); ig++) { vector3d<int> G(&gvec__(0, ig)); if (local_gvec_mapping.count(G) != 0) { this->f_pw_local_[local_gvec_mapping[G]] = v[ig]; } } if (ctx_.full_potential()) { h5f__.read("f_mt", f_mt_); } } /// Set the global pointer to the muffin-tin part void set_mt_ptr(T* mt_ptr__) { f_mt_ = mdarray<T, 3>(mt_ptr__, angular_domain_size_, unit_cell_.max_num_mt_points(), unit_cell_.num_atoms(), "f_mt_"); set_local_mt_ptr(); } /// Set the pointer to the interstitial part void set_rg_ptr(T* rg_ptr__) { this->f_rg_ = mdarray<T, 1>(rg_ptr__, this->fft_->local_size()); } inline Spheric_function<spectral, T> const& f_mt(int ialoc__) const { return f_mt_local_(ialoc__); } double value(vector3d<double>& vc) { int ja{-1}, jr{-1}; double dr{0}, tp[2]; if (unit_cell_.is_point_in_mt(vc, ja, jr, dr, tp)) { int lmax = Utils::lmax_by_lmmax(angular_domain_size_); std::vector<double> rlm(angular_domain_size_); SHT::spherical_harmonics(lmax, tp[0], tp[1], &rlm[0]); double p{0}; for (int lm = 0; lm < angular_domain_size_; lm++) { double d = (f_mt_(lm, jr + 1, ja) - f_mt_(lm, jr, ja)) / unit_cell_.atom(ja).type().radial_grid().dx(jr); p += rlm[lm] * (f_mt_(lm, jr, ja) + d * dr); } return p; } else { STOP(); double p{0}; //for (int ig = 0; ig < gvec_.num_gvec(); ig++) { // vector3d<double> vgc = gvec_.gvec_cart(ig); // p += std::real(f_pw_(ig) * std::exp(double_complex(0.0, vc * vgc))); //} return p; } } mdarray<T, 3>& f_mt() { return f_mt_; } /// Compute inner product <f|g> T inner(Periodic_function<T> const* g__) const { PROFILE("sirius::Periodic_function::inner"); assert(this->fft_ == g__->fft_); assert(&step_function_ == &g__->step_function_); assert(&unit_cell_ == &g__->unit_cell_); assert(&comm_ == &g__->comm_); T result_rg{0}; if (!ctx_.full_potential()) { Smooth_periodic_function<T> const& tmp = *g__; result_rg = Smooth_periodic_function<T>::inner(tmp); //#pragma omp parallel //{ // T rt{0}; // // #pragma omp for schedule(static) // for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { // rt += std::conj(this->f_rg(irloc)) * g__->f_rg(irloc); // } // #pragma omp critical // result_rg += rt; //} } else { for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { result_rg += type_wrapper<T>::bypass(std::conj(this->f_rg(irloc))) * g__->f_rg(irloc) * this->step_function_.theta_r(irloc); } result_rg *= (unit_cell_.omega() / this->fft_->size()); this->fft_->comm().allreduce(&result_rg, 1); } T result_mt{0}; if (ctx_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { auto r = sirius::inner(f_mt(ialoc), g__->f_mt(ialoc)); result_mt += r; } comm_.allreduce(&result_mt, 1); } return result_mt + result_rg; } }; } #endif // __PERIODIC_FUNCTION_H__
GB_unop__identity_uint64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc64) // op(A') function: GB (_unop_tran__identity_uint64_fc64) // C type: uint64_t // A type: GxB_FC64_t // cast: uint64_t cij = GB_cast_to_uint64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc64) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif