source
stringlengths
3
92
c
stringlengths
26
2.25M
gimplify.c
/* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "varray.h" #include "tree-gimple.h" #include "tree-inline.h" #include "diagnostic.h" #include "langhooks.h" #include "langhooks-def.h" #include "tree-flow.h" #include "cgraph.h" #include "timevar.h" #include "except.h" #include "hashtab.h" #include "flags.h" #include "real.h" #include "function.h" #include "output.h" #include "expr.h" #include "ggc.h" #include "toplev.h" #include "target.h" #include "optabs.h" #include "pointer-set.h" enum gimplify_omp_var_data { GOVD_SEEN = 1, GOVD_EXPLICIT = 2, GOVD_SHARED = 4, GOVD_PRIVATE = 8, GOVD_FIRSTPRIVATE = 16, GOVD_LASTPRIVATE = 32, GOVD_REDUCTION = 64, GOVD_LOCAL = 128, GOVD_DEBUG_PRIVATE = 256, GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL) }; struct gimplify_omp_ctx { struct gimplify_omp_ctx *outer_context; splay_tree variables; struct pointer_set_t *privatized_types; location_t location; enum omp_clause_default_kind default_kind; bool is_parallel; bool is_combined_parallel; }; struct gimplify_ctx { struct gimplify_ctx *prev_context; tree current_bind_expr; tree temps; tree conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; }; static struct gimplify_ctx *gimplify_ctxp; static struct gimplify_omp_ctx *gimplify_omp_ctxp; /* Formal (expression) temporary table handling: Multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declarations. */ static enum gimplify_status gimplify_compound_expr (tree *, tree *, bool); #ifdef ENABLE_CHECKING static bool cpt_same_type (tree a, tree b); #endif /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2)); return 1; } /* Set up a context for the gimplifier. */ void push_gimplify_context (void) { struct gimplify_ctx *c; c = (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx)); c->prev_context = gimplify_ctxp; if (optimize) c->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); gimplify_ctxp = c; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the unexpanded_var_list. */ void pop_gimplify_context (tree body) { struct gimplify_ctx *c = gimplify_ctxp; tree t; gcc_assert (c && !c->current_bind_expr); gimplify_ctxp = c->prev_context; /* LLVM LOCAL begin */ #ifndef ENABLE_LLVM /* LLVM wants to know about gimple formal temps. */ for (t = c->temps; t ; t = TREE_CHAIN (t)) DECL_GIMPLE_FORMAL_TEMP_P (t) = 0; #else t = 0; #endif /* LLVM LOCAL end */ if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); if (optimize) htab_delete (c->temp_htab); free (c); } static void gimple_push_bind_expr (tree bind) { TREE_CHAIN (bind) = gimplify_ctxp->current_bind_expr; gimplify_ctxp->current_bind_expr = bind; } static void gimple_pop_bind_expr (void) { gimplify_ctxp->current_bind_expr = TREE_CHAIN (gimplify_ctxp->current_bind_expr); } tree gimple_current_bind_expr (void) { return gimplify_ctxp->current_bind_expr; } /* Returns true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (!gimplify_ctxp->conditional_cleanups); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (tree *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p); gimplify_ctxp->conditional_cleanups = NULL_TREE; } } /* A stable comparison routine for use with splay trees and DECLs. */ static int splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb) { tree a = (tree) xa; tree b = (tree) xb; return DECL_UID (a) - DECL_UID (b); } /* Create a new omp construct that deals with variable remapping. */ static struct gimplify_omp_ctx * new_omp_context (bool is_parallel, bool is_combined_parallel) { struct gimplify_omp_ctx *c; c = XCNEW (struct gimplify_omp_ctx); c->outer_context = gimplify_omp_ctxp; c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0); c->privatized_types = pointer_set_create (); c->location = input_location; c->is_parallel = is_parallel; c->is_combined_parallel = is_combined_parallel; c->default_kind = OMP_CLAUSE_DEFAULT_SHARED; return c; } /* Destroy an omp construct that deals with variable remapping. */ static void delete_omp_context (struct gimplify_omp_ctx *c) { splay_tree_delete (c->variables); pointer_set_destroy (c->privatized_types); XDELETE (c); } static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int); static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool); /* A subroutine of append_to_statement_list{,_force}. T is not NULL. */ static void append_to_statement_list_1 (tree t, tree *list_p) { tree list = *list_p; tree_stmt_iterator i; if (!list) { if (t && TREE_CODE (t) == STATEMENT_LIST) { *list_p = t; return; } *list_p = list = alloc_stmt_list (); } i = tsi_last (list); tsi_link_after (&i, t, TSI_CONTINUE_LINKING); } /* Add T to the end of the list container pointed to by LIST_P. If T is an expression with no effects, it is ignored. */ void append_to_statement_list (tree t, tree *list_p) { if (t && TREE_SIDE_EFFECTS (t)) append_to_statement_list_1 (t, list_p); } /* Similar, but the statement is always added, regardless of side effects. */ void append_to_statement_list_force (tree t, tree *list_p) { if (t != NULL_TREE) append_to_statement_list_1 (t, list_p); } /* Both gimplify the statement T and append it to LIST_P. */ void gimplify_and_add (tree t, tree *list_p) { gimplify_stmt (&t); append_to_statement_list (t, list_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a nameless artificial label and put it in the current function context. Returns the newly created label. */ tree create_artificial_label (void) { tree lab = build_decl (LABEL_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (lab) = 1; DECL_IGNORED_P (lab) = 1; DECL_CONTEXT (lab) = current_function_decl; return lab; } /* Subroutine for find_single_pointer_decl. */ static tree find_single_pointer_decl_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { tree *pdecl = (tree *) data; if (DECL_P (*tp) && POINTER_TYPE_P (TREE_TYPE (*tp))) { if (*pdecl) { /* We already found a pointer decl; return anything other than NULL_TREE to unwind from walk_tree signalling that we have a duplicate. */ return *tp; } *pdecl = *tp; } return NULL_TREE; } /* Find the single DECL of pointer type in the tree T and return it. If there are zero or more than one such DECLs, return NULL. */ static tree find_single_pointer_decl (tree t) { tree decl = NULL_TREE; if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL)) { /* find_single_pointer_decl_1 returns a nonzero value, causing walk_tree to return a nonzero value, to indicate that it found more than one pointer DECL. */ return NULL_TREE; } return decl; } /* Create a new temporary name with PREFIX. Returns an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Does NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tree new_type; /* Make the type of the variable writable. */ new_type = build_type_variant (type, 0, 0); TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type); tmp_var = build_decl (VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL, type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DOES push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; /* We don't allow types that are addressable (meaning we can't make copies), or incomplete. We also used to reject every variable size objects here, but now support those for which a constant upper bound can be obtained. The processing for variable sizes is performed in gimple_add_tmp_var, point at which it really matters and possibly reached via paths not going through this function, e.g. after direct calls to create_tmp_var_raw. */ gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type)); tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Given a tree, try to return a useful variable name that we can use to prefix a temporary that is being assigned the value of the tree. I.E. given <temp> = &A, return A. */ const char * get_name (tree t) { tree stripped_decl; stripped_decl = t; STRIP_NOPS (stripped_decl); if (DECL_P (stripped_decl) && DECL_NAME (stripped_decl)) return IDENTIFIER_POINTER (DECL_NAME (stripped_decl)); else { switch (TREE_CODE (stripped_decl)) { case ADDR_EXPR: return get_name (TREE_OPERAND (stripped_decl, 0)); break; default: return NULL; } } } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ /* LLVM LOCAL begin */ #ifdef ENABLE_LLVM if (1) /* LLVM wants temporaries created in SSA form, never reuse one. */ #else if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) #endif /* LLVM LOCAL end */ ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = (void *) elt_p; } else { elt_p = (elt_t *) *slot; ret = elt_p->temp; } } if (is_formal) DECL_GIMPLE_FORMAL_TEMP_P (ret) = 1; return ret; } /* Returns a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ static tree internal_get_tmp_var (tree val, tree *pre_p, tree *post_p, bool is_formal) { tree t, mod; gimplify_expr (&val, pre_p, post_p, is_gimple_formal_tmp_rhs, fb_rvalue); t = lookup_tmp_var (val, is_formal); if (is_formal) { tree u = find_single_pointer_decl (val); if (u && TREE_CODE (u) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (u)) u = DECL_GET_RESTRICT_BASE (u); if (u && TYPE_RESTRICT (TREE_TYPE (u))) { if (DECL_BASED_ON_RESTRICT_P (t)) gcc_assert (u == DECL_GET_RESTRICT_BASE (t)); else { DECL_BASED_ON_RESTRICT_P (t) = 1; SET_DECL_RESTRICT_BASE (t, u); } } } if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; mod = build2 (INIT_EXPR, TREE_TYPE (t), t, val); if (EXPR_HAS_LOCATION (val)) SET_EXPR_LOCUS (mod, EXPR_LOCUS (val)); else SET_EXPR_LOCATION (mod, input_location); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); /* If we're gimplifying into ssa, gimplify_modify_expr will have given our temporary an ssa name. Find and return it. */ if (gimplify_ctxp->into_ssa) t = TREE_OPERAND (mod, 0); return t; } /* Returns a formal temporary variable initialized with VAL. PRE_P points to a statement list where side-effects needed to compute VAL should be stored. */ tree get_formal_tmp_var (tree val, tree *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Returns a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, tree *pre_p, tree *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, tree scope, bool debug_info) { tree last = vars; if (last) { tree temps, block; /* C99 mode puts the default 'return 0;' for main outside the outer braces. So drill down until we find an actual scope. */ while (TREE_CODE (scope) == COMPOUND_EXPR) scope = TREE_OPERAND (scope, 0); gcc_assert (TREE_CODE (scope) == BIND_EXPR); temps = nreverse (last); block = BIND_EXPR_BLOCK (scope); if (!block || !debug_info) { TREE_CHAIN (last) = BIND_EXPR_VARS (scope); BIND_EXPR_VARS (scope) = temps; } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { BIND_EXPR_VARS (scope) = chainon (BIND_EXPR_VARS (scope), temps); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (TREE_CODE (var) == VAR_DECL); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } void gimple_add_tmp_var (tree tmp) { gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { TREE_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; /* Mark temporaries local within the nearest enclosing parallel. */ if (gimplify_omp_ctxp) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; while (ctx && !ctx->is_parallel) ctx = ctx->outer_context; if (ctx) omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN); } } else if (cfun) record_vars (tmp); else declare_vars (tmp, DECL_SAVED_TREE (current_function_decl), false); } /* Determines whether to assign a locus to the statement STMT. */ static bool should_carry_locus_p (tree stmt) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (TREE_CODE (stmt) == LABEL_EXPR) return false; /* Do not annotate empty statements, since it confuses gcov. */ if (!TREE_SIDE_EFFECTS (stmt)) return false; return true; } static void annotate_one_with_locus (tree t, location_t locus) { if (EXPR_P (t) && ! EXPR_HAS_LOCATION (t) && should_carry_locus_p (t)) SET_EXPR_LOCATION (t, locus); } void annotate_all_with_locus (tree *stmt_p, location_t locus) { tree_stmt_iterator i; if (!*stmt_p) return; for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i)) { tree t = tsi_stmt (i); /* Assuming we've already been gimplified, we shouldn't see nested chaining constructs anymore. */ gcc_assert (TREE_CODE (t) != STATEMENT_LIST && TREE_CODE (t) != COMPOUND_EXPR); annotate_one_with_locus (t, locus); } } /* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { enum tree_code code = TREE_CODE (*tp); /* Don't unshare types, decls, constants and SAVE_EXPR nodes. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant || code == SAVE_EXPR || code == TARGET_EXPR /* We can't do anything sensible with a BLOCK used as an expression, but we also can't just die when we see it because of non-expression uses. So just avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; else { gcc_assert (code != BIND_EXPR); copy_tree_r (tp, walk_subtrees, data); } return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1), then *TP is deep copied by calling copy_tree_r. This unshares the same trees as copy_tree_r with the exception of SAVE_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, NULL, NULL); *walk_subtrees = 0; } /* Otherwise, mark the tree as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } static tree unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (TREE_VISITED (*tp)) TREE_VISITED (*tp) = 0; else *walk_subtrees = 0; return NULL_TREE; } /* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the bodies of any nested functions if we are unsharing the entire body of FNDECL. */ static void unshare_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, copy_if_shared_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, unmark_visited_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Unshare T and all the trees reached from T via TREE_CHAIN. */ static void unshare_all_trees (tree t) { walk_tree (&t, copy_if_shared_r, NULL, NULL); walk_tree (&t, unmark_visited_r, NULL, NULL); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* A terser interface for building a representation of an exception specification. */ tree gimple_build_eh_filter (tree body, tree allowed, tree failure) { tree t; /* FIXME should the allowed types go in TREE_TYPE? */ t = build2 (EH_FILTER_EXPR, void_type_node, allowed, NULL_TREE); append_to_statement_list (failure, &EH_FILTER_FAILURE (t)); t = build2 (TRY_CATCH_EXPR, void_type_node, NULL_TREE, t); append_to_statement_list (body, &TREE_OPERAND (t, 0)); return t; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Returns the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; default: goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (tree *save, tree *restore) { tree save_call, tmp_var; save_call = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_SAVE], NULL_TREE); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); *save = build2 (MODIFY_EXPR, ptr_type_node, tmp_var, save_call); *restore = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_RESTORE], tree_cons (NULL_TREE, tmp_var, NULL_TREE)); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, tree *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; /* Mark variable as local. */ if (ctx && !is_global_var (t) && (! DECL_SEEN_IN_BIND_EXPR_P (t) || splay_tree_lookup (ctx->variables, (splay_tree_key) t) == NULL)) omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN); DECL_SEEN_IN_BIND_EXPR_P (t) = 1; } /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (t) && (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t)) && !needs_to_live_in_memory (t)) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; } gimple_push_bind_expr (bind_expr); gimplify_ctxp->save_stack = false; gimplify_to_stmt_list (&BIND_EXPR_BODY (bind_expr)); if (gimplify_ctxp->save_stack) { tree stack_save, stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); t = build2 (TRY_FINALLY_EXPR, void_type_node, BIND_EXPR_BODY (bind_expr), NULL_TREE); append_to_statement_list (stack_restore, &TREE_OPERAND (t, 1)); BIND_EXPR_BODY (bind_expr) = NULL_TREE; append_to_statement_list (stack_save, &BIND_EXPR_BODY (bind_expr)); append_to_statement_list (t, &BIND_EXPR_BODY (bind_expr)); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); if (temp) { *expr_p = temp; append_to_statement_list (bind_expr, pre_p); return GS_OK; } else return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the list where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, tree *pre_p) { tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL /* APPLE LOCAL radar 6261552 */ /* code to check for cur_block is removed. */ || ret_expr == error_mark_node) return GS_ALL_DONE; if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); if (TREE_CODE (result_decl) == INDIRECT_REF) /* See through a return by reference. */ result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl || aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) result = result_decl; else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_var (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); /* If we didn't use a temporary, then the result is just the result_decl. Otherwise we need a simple copy. This should already be gimple. */ if (result == result_decl) ret_expr = result; else ret_expr = build2 (MODIFY_EXPR, TREE_TYPE (result), result_decl, result); TREE_OPERAND (stmt, 0) = ret_expr; return GS_ALL_DONE; } /* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == VAR_DECL) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) gimplify_type_sizes (TREE_TYPE (decl), stmt_p); if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, args, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p); /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; args = tree_cons (NULL, DECL_SIZE_UNIT (decl), NULL); t = built_in_decls[BUILT_IN_ALLOCA]; /* LLVM LOCAL begin add alloca alignment */ /* We may have specified an alignment on the alloca - store it on the function call so that we can emit this later and not lose it. */ DECL_USER_ALIGN (t) = DECL_USER_ALIGN (decl); DECL_ALIGN(t) = DECL_ALIGN(decl); /* LLVM LOCAL end add alloca alignment */ t = build_function_call_expr (t, args); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, addr, t); gimplify_and_add (t, stmt_p); /* Indicate that we need to restore the stack level when the enclosing BIND_EXPR is exited. */ gimplify_ctxp->save_stack = true; } if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, stmt_p); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, tree *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); tree jump_stmt = build_and_jump (&LABEL_EXPR_LABEL (start_label)); append_to_statement_list (start_label, pre_p); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); if (gimplify_ctxp->exit_label) { append_to_statement_list (jump_stmt, pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, gimplify_ctxp->exit_label); } else *expr_p = jump_stmt; gimplify_ctxp->exit_label = saved_label; return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { tree case1 = *(tree *)p1; tree case2 = *(tree *)p2; return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in place in ascending order. */ void sort_case_labels (tree label_vec) { size_t len = TREE_VEC_LENGTH (label_vec); tree default_case = TREE_VEC_ELT (label_vec, len - 1); if (CASE_LOW (default_case)) { size_t i; /* The last label in the vector should be the default case but it is not. */ for (i = 0; i < len; ++i) { tree t = TREE_VEC_ELT (label_vec, i); if (!CASE_LOW (t)) { default_case = t; TREE_VEC_ELT (label_vec, i) = TREE_VEC_ELT (label_vec, len - 1); TREE_VEC_ELT (label_vec, len - 1) = default_case; break; } } } qsort (&TREE_VEC_ELT (label_vec, 0), len - 1, sizeof (tree), compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, tree *pre_p) { tree switch_expr = *expr_p; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (SWITCH_BODY (switch_expr)) { VEC(tree,heap) *labels, *saved_labels; tree label_vec, default_case = NULL_TREE; size_t i, len; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ gcc_assert (!SWITCH_LABELS (switch_expr)); saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8); gimplify_to_stmt_list (&SWITCH_BODY (switch_expr)); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; i = 0; while (i < VEC_length (tree, labels)) { tree elt = VEC_index (tree, labels, i); tree low = CASE_LOW (elt); bool remove_element = FALSE; if (low) { /* Discard empty ranges. */ tree high = CASE_HIGH (elt); if (high && INT_CST_LT (high, low)) remove_element = TRUE; } else { /* The default case must be the last label in the list. */ gcc_assert (!default_case); default_case = elt; remove_element = TRUE; } if (remove_element) VEC_ordered_remove (tree, labels, i); else i++; } len = i; label_vec = make_tree_vec (len + 1); SWITCH_LABELS (*expr_p) = label_vec; append_to_statement_list (switch_expr, pre_p); if (! default_case) { /* If the switch has no default label, add one, so that we jump around the switch body. */ default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, create_artificial_label ()); append_to_statement_list (SWITCH_BODY (switch_expr), pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (default_case)); } else *expr_p = SWITCH_BODY (switch_expr); for (i = 0; i < len; ++i) TREE_VEC_ELT (label_vec, i) = VEC_index (tree, labels, i); TREE_VEC_ELT (label_vec, len) = default_case; VEC_free (tree, heap, labels); sort_case_labels (label_vec); SWITCH_BODY (switch_expr) = NULL; } else gcc_assert (SWITCH_LABELS (switch_expr)); return ret; } static enum gimplify_status gimplify_case_label_expr (tree *expr_p) { tree expr = *expr_p; struct gimplify_ctx *ctxp; /* Invalid OpenMP programs can play Duff's Device type games with #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels) break; VEC_safe_push (tree, heap, ctxp->case_labels, expr); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (expr)); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); if (TREE_TYPE (expr) != type) { tree old_type = TREE_TYPE (expr); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; /* And wrap the whole thing inside a NOP_EXPR. */ expr = build1 (NOP_EXPR, old_type, expr); *expr_p = expr; } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree ctype = TREE_TYPE (expr); tree addr_expr = TREE_OPERAND (expr, 0); tree atype = TREE_TYPE (addr_expr); tree dctype, datype, ddatype, otype, obj_expr; /* Both cast and addr_expr types should be pointers. */ if (!POINTER_TYPE_P (ctype) || !POINTER_TYPE_P (atype)) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (atype); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* Both cast and addr_expr types should address the same object type. */ dctype = TREE_TYPE (ctype); ddatype = TREE_TYPE (datype); if (!lang_hooks.types_compatible_p (ddatype, dctype)) return; /* The addr_expr and the object type should match. */ obj_expr = TREE_OPERAND (addr_expr, 0); otype = TREE_TYPE (obj_expr); if (!lang_hooks.types_compatible_p (otype, datype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (dctype) || TREE_CODE (TYPE_SIZE_UNIT (dctype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, dctype, obj_expr, TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (dctype), size_int (TYPE_ALIGN_UNIT (dctype)))); *expr_p = build1 (ADDR_EXPR, ctype, *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { gcc_assert (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } return GS_OK; } /* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (TREE_CODE (decl) == VAR_DECL && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (errorcount || sorrycount); return GS_ERROR; } /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true)) return GS_ALL_DONE; /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { *expr_p = unshare_expr (DECL_VALUE_EXPR (decl)); return GS_OK; } return GS_ALL_DONE; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node pointed to by EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, tree *pre_p, tree *post_p, fallback_t fallback) { tree *p; VEC(tree,heap) *stack; enum gimplify_status ret = GS_OK, tret; int i; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ stack = VEC_alloc (tree, heap, 10); /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref (*p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; VEC_safe_push (tree, heap, stack, *p); } gcc_assert (VEC_length (tree, stack)); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VEC_length (tree, stack) - 1; i >= 0; i--) { tree t = VEC_index (tree, stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ /* LLVM LOCAL begin */ /* Handle the LLVM extension that allows: (ARRAY_REF ptr, idx) */ if (!TREE_OPERAND (t, 2) #ifdef ENABLE_LLVM && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == ARRAY_TYPE #endif ) /* LLVM LOCAL end */ { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } /* LLVM LOCAL begin */ /* Handle the LLVM extension that allows: (ARRAY_REF ptr, idx) */ if (!TREE_OPERAND (t, 3) #ifdef ENABLE_LLVM && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == ARRAY_TYPE #endif ) /* LLVM LOCAL end */ { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (!TREE_OPERAND (t, 2)) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop (EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VEC_length (tree, stack) > 0; ) { tree t = VEC_pop (tree, stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. Temporary fix for gcc.c-torture/execute/20040313-1.c. Gimplify non-constant array indices into a temporary variable. FIXME - The real fix is to gimplify post-modify expressions into a minimal gimple lvalue. However, that exposes bugs in alias analysis. The alias analyzer does not handle &PTR->FIELD very well. Will fix after the branch is merged into mainline (dnovillo 2004-05-03). */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback); ret = MIN (ret, tret); /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); ret = MIN (ret, GS_OK); } VEC_free (tree, heap, stack); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1, post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. */ if (postfix) { ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs); t1 = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); if (postfix) { gimplify_and_add (t1, orig_post_p); append_to_statement_list (post, orig_post_p); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = t1; return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || TREE_CODE (size) == INTEGER_CST) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Subroutine of gimplify_call_expr: Gimplify a single argument. */ static enum gimplify_status gimplify_arg (tree *expr_p, tree *pre_p) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*expr_p))) test = is_gimple_val, fb = fb_rvalue; else test = is_gimple_lvalue, fb = fb_either; /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (expr_p, pre_p, NULL, test, fb); } /* Gimplify the CALL_EXPR node pointed to by EXPR_P. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, tree *pre_p, bool want_value) { tree decl; tree arglist; enum gimplify_status ret; gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_START) { if (!arglist || !TREE_CHAIN (arglist)) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (); return GS_OK; } if (fold_builtin_next_arg (TREE_CHAIN (arglist))) { *expr_p = build_empty_stmt (); return GS_OK; } /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ return gimplify_arg (&TREE_VALUE (TREE_OPERAND (*expr_p, 1)), pre_p); } } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, NULL, is_gimple_call_addr, fb_rvalue); if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); for (arglist = TREE_OPERAND (*expr_p, 1); arglist; arglist = TREE_CHAIN (arglist)) { enum gimplify_status t; t = gimplify_arg (&TREE_VALUE (arglist), pre_p); if (t == GS_ERROR) ret = GS_ERROR; } if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } } } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR && (call_expr_flags (*expr_p) & (ECF_CONST | ECF_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR) { /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; */ expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn (a && b) into if (a) if (b). */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) expr = expr_last (else_); else if (then_se) expr = expr_last (then_); else expr = NULL; if (expr && TREE_CODE (expr) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (expr); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { t = build_and_jump (&end_label); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* FALLTHRU */ case EQ_EXPR: case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: /* These expressions always produce boolean results. */ TREE_TYPE (expr) = boolean_type_node; return expr; default: /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ return fold_convert (boolean_type_node, expr); } } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. TARGET is the tree for T1 above. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, tree *pre_p, fallback_t fallback) { tree expr = *expr_p; tree tmp, tmp2, type; enum gimplify_status ret; type = TREE_TYPE (expr); /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (! VOID_TYPE_P (type)) { tree result; if ((fallback & fb_lvalue) == 0) { result = tmp2 = tmp = create_tmp_var (TREE_TYPE (expr), "iftmp"); ret = GS_ALL_DONE; } else { tree type = build_pointer_type (TREE_TYPE (expr)); if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build_fold_addr_expr (TREE_OPERAND (expr, 1)); if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build_fold_addr_expr (TREE_OPERAND (expr, 2)); tmp2 = tmp = create_tmp_var (type, "iftmp"); expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (expr, 0), TREE_OPERAND (expr, 1), TREE_OPERAND (expr, 2)); result = build_fold_indirect_ref (tmp); ret = GS_ALL_DONE; } /* Build the then clause, 't1 = a;'. But don't build an assignment if this branch is void; in C++ it can be, if it's a throw. */ if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, void_type_node, tmp, TREE_OPERAND (expr, 1)); /* Build the else clause, 't1 = b;'. */ if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, void_type_node, tmp2, TREE_OPERAND (expr, 2)); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_and_add (expr, pre_p); *expr_p = result; return ret; } /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p); gimple_pop_condition (pre_p); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); gimple_push_condition (); gimplify_to_stmt_list (&TREE_OPERAND (expr, 1)); gimplify_to_stmt_list (&TREE_OPERAND (expr, 2)); recalculate_side_effects (expr); gimple_pop_condition (pre_p); if (ret == GS_ERROR) ; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))) ret = GS_ALL_DONE; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 2))) /* Rewrite "if (a); else b" to "if (!a) b" */ { TREE_OPERAND (expr, 0) = invert_truthvalue (TREE_OPERAND (expr, 0)); ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); tmp = TREE_OPERAND (expr, 1); TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 2); TREE_OPERAND (expr, 2) = tmp; } else /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); *expr_p = expr; return ret; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr, from; to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); args = tree_cons (NULL, size, NULL); t = build_fold_addr_expr (from); args = tree_cons (NULL, t, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMCPY]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr; to = TREE_OPERAND (*expr_p, 0); args = tree_cons (NULL, size, NULL); args = tree_cons (NULL, integer_zero_node, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMSET]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Returns non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ int lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if (TREE_CODE (t) == INDIRECT_REF && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate *EXPR_P, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, tree *pre_p, tree *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is invariant, then there's nothing to pre-evaluate. But ensure it doesn't have any side-effects since a SAVE_EXPR is invariant but has side effects and might contain a reference to the object we're initializing. */ if (TREE_INVARIANT (*expr_p) && !TREE_SIDE_EFFECTS (*expr_p)) return; /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p); for (ix = 0; VEC_iterate (constructor_elt, v, ix, ce); ix++) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the lhs is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *, tree *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, tree *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label; tree var, var_type, cref; loop_entry_label = create_artificial_label (); loop_exit_label = create_artificial_label (); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type, NULL); append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, lower), pre_p); /* Add the loop entry label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_entry_label), pre_p); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else append_to_statement_list (build2 (MODIFY_EXPR, TREE_TYPE (cref), cref, value), pre_p); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_and_add (build3 (COND_EXPR, void_type_node, build2 (EQ_EXPR, boolean_type_node, var, upper), build1 (GOTO_EXPR, void_type_node, loop_exit_label), NULL_TREE), pre_p); /* Otherwise, increment the index var... */ append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node))), pre_p); /* ...and jump back to the loop entry. */ append_to_statement_list (build1 (GOTO_EXPR, void_type_node, loop_entry_label), pre_p); /* Add the loop exit label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_exit_label), pre_p); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts, tree *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref, init; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); } } } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree object; tree ctor = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (ctor); enum gimplify_status ret; VEC(constructor_elt,gc) *elts; if (TREE_CODE (ctor) != CONSTRUCTOR) return GS_UNHANDLED; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; object = TREE_OPERAND (*expr_p, 0); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_type_elements, num_ctor_elements; HOST_WIDE_INT num_nonzero_elements; bool cleared, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (VEC_empty (constructor_elt, elts)) break; /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_ctor_elements, &cleared); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) /* APPLE LOCAL begin CW asm blocks */ && TREE_CODE (object) == VAR_DECL && !DECL_IASM_DONT_PROMOTE_TO_STATIC (object)) /* APPLE LOCAL end CW asm blocks */ { DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ num_type_elements = count_type_elements (type, true); /* If count_type_elements could not determine number of type elements for a constant-sized object, assume clearing is needed. Don't do this for variable-sized objects, as store_constructor will ignore the clearing of variable-sized objects. */ if (num_type_elements < 0 && int_size_in_bytes (type) >= 0) cleared = true; /* If there are "lots" of zeros, then block clear the object first. */ else if (num_type_elements - num_nonzero_elements > CLEAR_RATIO && num_nonzero_elements < num_type_elements/4) cleared = true; /* ??? This bit ought not be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ else if (num_ctor_elements < num_type_elements) cleared = true; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. */ if (valid_const_initializer && !cleared) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && !can_move_by_pieces (size, align)) { tree new = create_tmp_var_raw (type, "C"); gimple_add_tmp_var (new); TREE_STATIC (new) = 1; TREE_READONLY (new) = 1; /* LLVM LOCAL begin */ /* On Darwin, we can't emit temporaries like this with private * linkage, because it breaks 'atomization' of stuff in the * object file by the linker. We need to emit this as a l label * without .globl. */ #ifndef CONFIG_DARWIN_H #ifdef ENABLE_LLVM DECL_LLVM_PRIVATE (new) = 1; #endif #endif /* LLVM LOCAL end */ DECL_INITIAL (new) = ctor; if (align > DECL_ALIGN (new)) { DECL_ALIGN (new) = align; DECL_USER_ALIGN (new) = 1; } walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL); TREE_OPERAND (*expr_p, 1) = new; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If there are nonzero elements, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; object = unshare_expr (object); gimplify_stmt (expr_p); append_to_statement_list (*expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (VEC_length (constructor_elt, elts) == 2); r = VEC_index (constructor_elt, elts, 0)->value; i = VEC_index (constructor_elt, elts, 1)->value; if (r == NULL || i == NULL) { tree zero = fold_convert (TREE_TYPE (type), integer_zero_node); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't belong into VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } /* Don't reduce a TREE_CONSTANT vector ctor even if we can't make a VECTOR_CST. It won't do anything for us, and it'll prevent us from representing it as a single constant. */ break; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ for (ix = 0; VEC_iterate (constructor_elt, elts, ix, ce); ix++) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = object; return GS_OK; } else return GS_ALL_DONE; } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree fold_indirect_ref_rhs (tree t) { tree type = TREE_TYPE (TREE_TYPE (t)); tree sub = t; tree subtype; STRIP_USELESS_TYPE_CONVERSION (sub); subtype = TREE_TYPE (sub); if (!POINTER_TYPE_P (subtype)) return NULL_TREE; if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (lang_hooks.types_compatible_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ else if (TREE_CODE (optype) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (optype))) { tree type_domain = TYPE_DOMAIN (optype); tree min_val = size_zero_node; if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE); } } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { tree type_domain; tree min_val = size_zero_node; tree osub = sub; sub = fold_indirect_ref_rhs (sub); if (! sub) sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub); type_domain = TYPE_DOMAIN (TREE_TYPE (sub)); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE); } return NULL_TREE; } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, tree *pre_p, tree *post_p, bool want_value) { enum gimplify_status ret = GS_OK; while (ret != GS_UNHANDLED) switch (TREE_CODE (*from_p)) { case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ tree t = fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { *from_p = t; ret = GS_OK; } else ret = GS_UNHANDLED; break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (!VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; } else ret = GS_UNHANDLED; } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; break; case CONSTRUCTOR: /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } else ret = GS_UNHANDLED; break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) /* It's OK to use the return slot directly unless it's an NRV. */ use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*to_p) == VAR_DECL && DECL_GIMPLE_FORMAL_TEMP_P (*to_p)) /* Don't use the original target if it's a formal temp; we don't want to take their addresses. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (!is_gimple_non_addressable (*to_p)) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; lang_hooks.mark_addressable (*to_p); } } ret = GS_UNHANDLED; break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } default: ret = GS_UNHANDLED; break; } return ret; } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with DECL_COMPLEX_GIMPLE_REG_P set. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, tree *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); TREE_OPERAND (*expr_p, 0) = lhs; TREE_OPERAND (*expr_p, 1) = new_rhs; if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = rhs; } return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. Do this after gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable types properly. */ if (zero_sized_type (TREE_TYPE (*from_p))) { gimplify_stmt (from_p); gimplify_stmt (to_p); append_to_statement_list (*from_p, pre_p); append_to_statement_list (*to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must here. */ maybe_with_size_expr (from_p); ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p), fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* If we've somehow already got an SSA_NAME on the LHS, then we're probably modified it twice. Not good. */ gcc_assert (TREE_CODE (*to_p) != SSA_NAME); *to_p = make_ssa_name (*to_p, *expr_p); } if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = *to_p; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree args, t, dest; t = TYPE_SIZE_UNIT (TREE_TYPE (op0)); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, op0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (op1); args = tree_cons (NULL, t, args); dest = build_fold_addr_expr (op0); args = tree_cons (NULL, dest, args); t = implicit_built_in_decls[BUILT_IN_MEMCMP]; t = build_function_call_expr (t, args); *expr_p = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P points to the expression to gimplify. Expressions of the form 'a && b' are gimplified to: a && b ? true : false gimplify_cond_expr will do the rest. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_boolean_expr (tree *expr_p) { /* Preserve the original type of the expression. */ tree type = TREE_TYPE (*expr_p); *expr_p = build3 (COND_EXPR, type, *expr_p, fold_convert (type, boolean_true_node), fold_convert (type, boolean_false_node)); return GS_OK; } /* Gimplifies an expression sequence. This function gimplifies each expression and re-writes the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ /* ??? Should rearrange to share the pre-queue with all the indirect invocations of gimplify_expr. Would probably save on creations of statement_list nodes. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, tree *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p); append_to_statement_list (*sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p); return GS_ALL_DONE; } } /* Gimplifies a statement list. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, tree *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { tree t; gimplify_stmt (tsi_stmt_ptr (i)); t = tsi_stmt (i); if (t == NULL) tsi_delink (&i); else if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } if (temp) { append_to_statement_list (*expr_p, pre_p); *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, tree *pre_p, tree *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); append_to_statement_list (TREE_OPERAND (*expr_p, 0), pre_p); val = NULL; } else val = get_initialized_tmp_var (val, pre_p, post_p); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Re-write the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; switch (TREE_CODE (op0)) { case INDIRECT_REF: case MISALIGNED_INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!lang_hooks.types_compatible_p (t_expr, t_op00)) { #ifdef ENABLE_CHECKING tree t_op0 = TREE_TYPE (op0); gcc_assert (POINTER_TYPE_P (t_expr) && cpt_same_type (TREE_CODE (t_op0) == ARRAY_TYPE ? TREE_TYPE (t_op0) : t_op0, TREE_TYPE (t_expr)) && POINTER_TYPE_P (t_op00) && cpt_same_type (t_op0, TREE_TYPE (t_op00))); #endif op00 = fold_convert (TREE_TYPE (expr), op00); } *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert (TREE_TYPE (expr), build_fold_addr_expr (TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret != GS_ERROR) { op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF) goto do_indirect_ref; /* Make sure TREE_INVARIANT, TREE_CONSTANT, and TREE_SIDE_EFFECTS is set properly. */ recompute_tree_invariant_for_addr_expr (expr); /* Mark the RHS addressable. */ lang_hooks.mark_addressable (TREE_OPERAND (expr, 0)); } break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; int noutputs = list_length (ASM_OUTPUTS (expr)); const char **oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; ret = GS_ALL_DONE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { size_t constraint_len; oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!allows_reg && allows_mem) lang_hooks.mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } for (link = ASM_INPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If we can't make copies, we can only accept memory. */ if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link)))) { if (allows_mem) allows_reg = 0; else { error ("impossible constraint in %<asm%>"); error ("non-memory input %d must stay in memory", i); return GS_ERROR; } } /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); lang_hooks.mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) { tree_stmt_iterator iter; tree body; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; tree old_cleanups = gimplify_ctxp->conditional_cleanups; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL_TREE; body = TREE_OPERAND (*expr_p, 0); gimplify_to_stmt_list (&body); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; for (iter = tsi_start (body); !tsi_end_p (iter); ) { tree *wce_p = tsi_stmt_ptr (iter); tree wce = *wce_p; if (TREE_CODE (wce) == WITH_CLEANUP_EXPR) { if (tsi_one_before_end_p (iter)) { tsi_link_before (&iter, TREE_OPERAND (wce, 0), TSI_SAME_STMT); tsi_delink (&iter); break; } else { tree sl, tfe; enum tree_code code; if (CLEANUP_EH_ONLY (wce)) code = TRY_CATCH_EXPR; else code = TRY_FINALLY_EXPR; sl = tsi_split_statement_list_after (&iter); tfe = build2 (code, void_type_node, sl, NULL_TREE); append_to_statement_list (TREE_OPERAND (wce, 0), &TREE_OPERAND (tfe, 1)); *wce_p = tfe; iter = tsi_start (sl); } } else tsi_next (&iter); } if (temp) { *expr_p = temp; append_to_statement_list (body, pre_p); return GS_OK; } else { *expr_p = body; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, tree *pre_p) { tree wce; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the WITH_CLEANUP_EXPR. */ if (errorcount || sorrycount) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); tree ffalse = build2 (MODIFY_EXPR, void_type_node, flag, boolean_false_node); tree ftrue = build2 (MODIFY_EXPR, void_type_node, flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); append_to_statement_list (ffalse, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (wce, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (ftrue, pre_p); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); CLEANUP_EH_ONLY (wce) = eh_only; append_to_statement_list (wce, pre_p); } gimplify_stmt (&TREE_OPERAND (wce, 0)); } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. */ gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { init = build2 (INIT_EXPR, void_type_node, temp, init); ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } append_to_statement_list (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { gimplify_stmt (&TARGET_EXPR_CLEANUP (targ)); gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); } /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context; usually, this means replacing it with a suitably gimple STATEMENT_LIST. */ void gimplify_stmt (tree *stmt_p) { gimplify_expr (stmt_p, NULL, NULL, is_gimple_stmt, fb_none); } /* Similarly, but force the result to be a STATEMENT_LIST. */ void gimplify_to_stmt_list (tree *stmt_p) { gimplify_stmt (stmt_p); if (!*stmt_p) *stmt_p = alloc_stmt_list (); else if (TREE_CODE (*stmt_p) != STATEMENT_LIST) { tree t = *stmt_p; *stmt_p = alloc_stmt_list (); append_to_statement_list (t, stmt_p); } } /* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels to CTX. If entries already exist, force them to be some flavor of private. If there is no enclosing parallel, do nothing. */ void omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; if (decl == NULL || !DECL_P (decl)) return; do { n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN); else return; } else if (ctx->is_parallel) omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE); ctx = ctx->outer_context; } while (ctx); } /* Similarly for each of the type sizes of TYPE. */ static void omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type) { if (type == NULL || type == error_mark_node) return; type = TYPE_MAIN_VARIANT (type); if (pointer_set_insert (ctx->privatized_types, type)) return; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type)); omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type)); break; case ARRAY_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type)); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field)); } } break; case POINTER_TYPE: case REFERENCE_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); break; default: break; } omp_firstprivatize_variable (ctx, TYPE_SIZE (type)); omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type)); lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type); } /* Add an entry for DECL in the OpenMP context CTX with FLAGS. */ static void omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags) { splay_tree_node n; unsigned int nflags; tree t; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return; /* Never elide decls whose type has TREE_ADDRESSABLE set. This means there are constructors involved somewhere. */ if (TREE_ADDRESSABLE (TREE_TYPE (decl)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))) flags |= GOVD_SEEN; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { /* We shouldn't be re-adding the decl with the same data sharing class. */ gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0); /* The only combination of data sharing classes we should see is FIRSTPRIVATE and LASTPRIVATE. */ nflags = n->value | flags; gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS) == (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE)); n->value = nflags; return; } /* When adding a variable-sized variable, we have to handle all sorts of additional bits of data: the pointer replacement variable, and the parameters of the type. */ if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* Add the pointer replacement variable as PRIVATE if the variable replacement is private, else FIRSTPRIVATE since we'll need the address of the original variable either for SHARED, or for the copy into or out of the context. */ if (!(flags & GOVD_LOCAL)) { nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE; nflags |= flags & GOVD_SEEN; t = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (t) == INDIRECT_REF); t = TREE_OPERAND (t, 0); gcc_assert (DECL_P (t)); omp_add_variable (ctx, t, nflags); } /* Add all of the variable and type parameters (which should have been gimplified to a formal temporary) as FIRSTPRIVATE. */ omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl)); omp_firstprivatize_variable (ctx, DECL_SIZE (decl)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* The variable-sized variable itself is never SHARED, only some form of PRIVATE. The sharing would take place via the pointer variable which we remapped above. */ if (flags & GOVD_SHARED) flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE | (flags & (GOVD_SEEN | GOVD_EXPLICIT)); /* We're going to make use of the TYPE_SIZE_UNIT at least in the alloca statement we generate for the variable, so make sure it is available. This isn't automatically needed for the SHARED case, since we won't be allocating local storage then. For local variables TYPE_SIZE_UNIT might not be gimplified yet, in this case omp_notice_variable will be called later on when it is gimplified. */ else if (! (flags & GOVD_LOCAL)) omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true); } else if (lang_hooks.decls.omp_privatize_by_reference (decl)) { gcc_assert ((flags & GOVD_LOCAL) == 0); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* Similar to the direct variable sized case above, we'll need the size of references being privatized. */ if ((flags & GOVD_SHARED) == 0) { t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (TREE_CODE (t) != INTEGER_CST) omp_notice_variable (ctx, t, true); } } splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags); } /* Record the fact that DECL was used within the OpenMP context CTX. IN_CODE is true when real code uses DECL, and false when we should merely emit default(none) errors. Return true if DECL is going to be remapped and thus DECL shouldn't be gimplified into its DECL_VALUE_EXPR (if any). */ static bool omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code) { splay_tree_node n; unsigned flags = in_code ? GOVD_SEEN : 0; bool ret = false, shared; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return false; /* Threadprivate variables are predetermined. */ if (is_global_var (decl)) { if (DECL_THREAD_LOCAL_P (decl)) return false; if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value = get_base_address (DECL_VALUE_EXPR (decl)); if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value)) return false; } } n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n == NULL) { enum omp_clause_default_kind default_kind, kind; if (!ctx->is_parallel) goto do_outer; /* ??? Some compiler-generated variables (like SAVE_EXPRs) could be remapped firstprivate instead of shared. To some extent this is addressed in omp_firstprivatize_type_sizes, but not effectively. */ default_kind = ctx->default_kind; kind = lang_hooks.decls.omp_predetermined_sharing (decl); if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED) default_kind = kind; switch (default_kind) { case OMP_CLAUSE_DEFAULT_NONE: error ("%qs not specified in enclosing parallel", IDENTIFIER_POINTER (DECL_NAME (decl))); error ("%Henclosing parallel", &ctx->location); /* FALLTHRU */ case OMP_CLAUSE_DEFAULT_SHARED: flags |= GOVD_SHARED; break; case OMP_CLAUSE_DEFAULT_PRIVATE: flags |= GOVD_PRIVATE; break; default: gcc_unreachable (); } omp_add_variable (ctx, decl, flags); shared = (flags & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); goto do_outer; } shared = ((flags | n->value) & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); /* If nothing changed, there's nothing left to do. */ if ((n->value & flags) == flags) return ret; flags |= n->value; n->value = flags; do_outer: /* If the variable is private in the current context, then we don't need to propagate anything to an outer context. */ if (flags & GOVD_PRIVATE) return ret; if (ctx->outer_context && omp_notice_variable (ctx->outer_context, decl, in_code)) return true; return ret; } /* Verify that DECL is private within CTX. If there's specific information to the contrary in the innermost scope, generate an error. */ static bool omp_is_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) { if (ctx == gimplify_omp_ctxp) { error ("iteration variable %qs should be private", IDENTIFIER_POINTER (DECL_NAME (decl))); n->value = GOVD_PRIVATE; return true; } else return false; } else if ((n->value & GOVD_EXPLICIT) != 0 && (ctx == gimplify_omp_ctxp || (ctx->is_combined_parallel && gimplify_omp_ctxp->outer_context == ctx))) { if ((n->value & GOVD_FIRSTPRIVATE) != 0) error ("iteration variable %qs should not be firstprivate", IDENTIFIER_POINTER (DECL_NAME (decl))); else if ((n->value & GOVD_REDUCTION) != 0) error ("iteration variable %qs should not be reduction", IDENTIFIER_POINTER (DECL_NAME (decl))); } return true; } if (ctx->is_parallel) return false; else if (ctx->outer_context) return omp_is_private (ctx->outer_context, decl); else return !is_global_var (decl); } /* Return true if DECL is private within a parallel region that binds to the current construct's context or in parallel region's REDUCTION clause. */ static bool omp_check_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; do { ctx = ctx->outer_context; if (ctx == NULL) return !(is_global_var (decl) /* References might be private, but might be shared too. */ || lang_hooks.decls.omp_privatize_by_reference (decl)); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n != NULL) return (n->value & GOVD_SHARED) == 0; } while (!ctx->is_parallel); return false; } /* Scan the OpenMP clauses in *LIST_P, installing mappings into a new and previous omp contexts. */ static void gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel, bool in_combined_parallel) { struct gimplify_omp_ctx *ctx, *outer_ctx; tree c; ctx = new_omp_context (in_parallel, in_combined_parallel); outer_ctx = ctx->outer_context; while ((c = *list_p) != NULL) { enum gimplify_status gs; bool remove = false; bool notice_outer = true; const char *check_non_private = NULL; unsigned int flags; tree decl; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: flags = GOVD_PRIVATE | GOVD_EXPLICIT; notice_outer = false; goto do_add; case OMP_CLAUSE_SHARED: flags = GOVD_SHARED | GOVD_EXPLICIT; goto do_add; case OMP_CLAUSE_FIRSTPRIVATE: flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT; check_non_private = "firstprivate"; goto do_add; case OMP_CLAUSE_LASTPRIVATE: flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "lastprivate"; goto do_add; case OMP_CLAUSE_REDUCTION: flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "reduction"; goto do_add; do_add: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } omp_add_variable (ctx, decl, flags); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c), GOVD_LOCAL | GOVD_SEEN); gimplify_omp_ctxp = ctx; push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_INIT (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_INIT (c)); push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_MERGE (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_MERGE (c)); gimplify_omp_ctxp = outer_ctx; } if (notice_outer) goto do_notice; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } do_notice: if (outer_ctx) omp_notice_variable (outer_ctx, decl, true); if (check_non_private && !in_parallel && omp_check_private (ctx, decl)) { error ("%s variable %qs is private in outer context", check_non_private, IDENTIFIER_POINTER (DECL_NAME (decl))); remove = true; } break; case OMP_CLAUSE_IF: OMP_CLAUSE_OPERAND (c, 0) = gimple_boolify (OMP_CLAUSE_OPERAND (c, 0)); /* Fall through. */ case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NUM_THREADS: gs = gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL, is_gimple_val, fb_rvalue); if (gs == GS_ERROR) remove = true; break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } gimplify_omp_ctxp = ctx; } /* For all variables that were not actually used within the context, remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */ static int gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data) { tree *list_p = (tree *) data; tree decl = (tree) n->key; unsigned flags = n->value; enum omp_clause_code code; tree clause; bool private_debug; if (flags & (GOVD_EXPLICIT | GOVD_LOCAL)) return 0; if ((flags & GOVD_SEEN) == 0) return 0; if (flags & GOVD_DEBUG_PRIVATE) { gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE); private_debug = true; } else private_debug = lang_hooks.decls.omp_private_debug_clause (decl, !!(flags & GOVD_SHARED)); if (private_debug) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_SHARED) { if (is_global_var (decl)) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context; while (ctx != NULL) { splay_tree_node on = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_PRIVATE | GOVD_REDUCTION)) != 0) break; ctx = ctx->outer_context; } if (ctx == NULL) return 0; } code = OMP_CLAUSE_SHARED; } else if (flags & GOVD_PRIVATE) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_FIRSTPRIVATE) code = OMP_CLAUSE_FIRSTPRIVATE; else gcc_unreachable (); clause = build_omp_clause (code); OMP_CLAUSE_DECL (clause) = decl; OMP_CLAUSE_CHAIN (clause) = *list_p; if (private_debug) OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1; *list_p = clause; return 0; } static void gimplify_adjust_omp_clauses (tree *list_p) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; tree c, decl; while ((c = *list_p) != NULL) { splay_tree_node n; bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = !(n->value & GOVD_SEEN); if (! remove) { bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED; if ((n->value & GOVD_DEBUG_PRIVATE) || lang_hooks.decls.omp_private_debug_clause (decl, shared)) { gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0 || ((n->value & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE)); OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_PRIVATE_DEBUG (c) = 1; } } break; case OMP_CLAUSE_LASTPRIVATE: /* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to accurately reflect the presence of a FIRSTPRIVATE clause. */ decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = (n->value & GOVD_FIRSTPRIVATE) != 0; break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } /* Add in any implicit data sharing. */ splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p); gimplify_omp_ctxp = ctx->outer_context; delete_omp_context (ctx); } /* Gimplify the contents of an OMP_PARALLEL statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static enum gimplify_status gimplify_omp_parallel (tree *expr_p, tree *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, true, OMP_PARALLEL_COMBINED (expr)); push_gimplify_context (); gimplify_stmt (&OMP_PARALLEL_BODY (expr)); if (TREE_CODE (OMP_PARALLEL_BODY (expr)) == BIND_EXPR) pop_gimplify_context (OMP_PARALLEL_BODY (expr)); else pop_gimplify_context (NULL_TREE); gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr)); return GS_ALL_DONE; } /* Gimplify the gross structure of an OMP_FOR statement. */ static enum gimplify_status gimplify_omp_for (tree *expr_p, tree *pre_p) { tree for_stmt, decl, t; enum gimplify_status ret = 0; for_stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, false, false); t = OMP_FOR_INIT (for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); decl = TREE_OPERAND (t, 0); gcc_assert (DECL_P (decl)); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))); /* Make sure the iteration variable is private. */ if (omp_is_private (gimplify_omp_ctxp, decl)) omp_notice_variable (gimplify_omp_ctxp, decl, true); else omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = OMP_FOR_COND (for_stmt); gcc_assert (COMPARISON_CLASS_P (t)); gcc_assert (TREE_OPERAND (t, 0) == decl); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = OMP_FOR_INCR (for_stmt); switch (TREE_CODE (t)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), 1); goto build_modify; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), -1); goto build_modify; build_modify: t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); t = build2 (MODIFY_EXPR, void_type_node, decl, t); OMP_FOR_INCR (for_stmt) = t; break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); t = TREE_OPERAND (t, 1); switch (TREE_CODE (t)) { case PLUS_EXPR: if (TREE_OPERAND (t, 1) == decl) { TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = decl; break; } case MINUS_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); break; default: gcc_unreachable (); } ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); break; default: gcc_unreachable (); } gimplify_to_stmt_list (&OMP_FOR_BODY (for_stmt)); gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt)); return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR; } /* Gimplify the gross structure of other OpenMP worksharing constructs. In particular, OMP_SECTIONS and OMP_SINGLE. */ static enum gimplify_status gimplify_omp_workshare (tree *expr_p, tree *pre_p) { tree stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, false, false); gimplify_to_stmt_list (&OMP_BODY (stmt)); gimplify_adjust_omp_clauses (&OMP_CLAUSES (stmt)); return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. The front end is supposed to have stabilized the lhs of the atomic operation as *ADDR. Return true if EXPR is this stabilized form. */ static bool goa_lhs_expr_p (tree expr, tree addr) { /* Also include casts to other type variants. The C front end is fond of adding these for e.g. volatile variables. This is like STRIP_TYPE_NOPS but includes the main variant lookup. */ while ((TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR || TREE_CODE (expr) == NON_LVALUE_EXPR) && TREE_OPERAND (expr, 0) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_TYPE (expr)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0))))) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == INDIRECT_REF && TREE_OPERAND (expr, 0) == addr) return true; if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0)) return true; return false; } /* A subroutine of gimplify_omp_atomic. Attempt to implement the atomic operation as a __sync_fetch_and_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns GS_UNHANDLED if the expression is not of the proper form. */ static enum gimplify_status gimplify_omp_atomic_fetch_op (tree *expr_p, tree addr, tree rhs, int index) { enum built_in_function base; tree decl, args, itype; enum insn_code *optab; /* Check for one of the supported fetch-op operations. */ switch (TREE_CODE (rhs)) { case PLUS_EXPR: base = BUILT_IN_FETCH_AND_ADD_N; optab = sync_add_optab; break; case MINUS_EXPR: base = BUILT_IN_FETCH_AND_SUB_N; optab = sync_add_optab; break; case BIT_AND_EXPR: base = BUILT_IN_FETCH_AND_AND_N; optab = sync_and_optab; break; case BIT_IOR_EXPR: base = BUILT_IN_FETCH_AND_OR_N; optab = sync_ior_optab; break; case BIT_XOR_EXPR: base = BUILT_IN_FETCH_AND_XOR_N; optab = sync_xor_optab; break; default: return GS_UNHANDLED; } /* Make sure the expression is of the proper form. */ if (goa_lhs_expr_p (TREE_OPERAND (rhs, 0), addr)) rhs = TREE_OPERAND (rhs, 1); else if (commutative_tree_code (TREE_CODE (rhs)) && goa_lhs_expr_p (TREE_OPERAND (rhs, 1), addr)) rhs = TREE_OPERAND (rhs, 0); else return GS_UNHANDLED; decl = built_in_decls[base + index + 1]; itype = TREE_TYPE (TREE_TYPE (decl)); /* LLVM LOCAL begin */ #ifdef ENABLE_LLVM /* FIXME: Add target specific check. */ return GS_UNHANDLED; #endif /* LLVM LOCAL end */ if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; args = tree_cons (NULL, fold_convert (itype, rhs), NULL); args = tree_cons (NULL, addr, args); *expr_p = build_function_call_expr (decl, args); return GS_OK; } /* A subroutine of gimplify_omp_atomic_pipeline. Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as a subexpression, 0 if it did not, or -1 if an error was encountered. */ static int goa_stabilize_expr (tree *expr_p, tree *pre_p, tree lhs_addr, tree lhs_var) { tree expr = *expr_p; int saw_lhs; if (goa_lhs_expr_p (expr, lhs_addr)) { *expr_p = lhs_var; return 1; } if (is_gimple_val (expr)) return 0; saw_lhs = 0; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case tcc_binary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); case tcc_unary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; default: break; } if (saw_lhs == 0) { enum gimplify_status gs; gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue); if (gs != GS_ALL_DONE) saw_lhs = -1; } return saw_lhs; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static enum gimplify_status gimplify_omp_atomic_pipeline (tree *expr_p, tree *pre_p, tree addr, tree rhs, int index) { tree oldval, oldival, oldival2, newval, newival, label; tree type, itype, cmpxchg, args, x, iaddr; cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1]; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); /* LLVM LOCAL begin */ #ifdef ENABLE_LLVM /* FIXME: Add target specific check. */ return GS_UNHANDLED; #endif /* LLVM LOCAL end */ if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; oldval = create_tmp_var (type, NULL); newval = create_tmp_var (type, NULL); /* Precompute as much of RHS as possible. In the same walk, replace occurrences of the lhs value with our temporary. */ if (goa_stabilize_expr (&rhs, pre_p, addr, oldval) < 0) return GS_ERROR; x = build_fold_indirect_ref (addr); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { oldival = oldval; newival = newval; iaddr = addr; } else { oldival = create_tmp_var (itype, NULL); newival = create_tmp_var (itype, NULL); x = build1 (VIEW_CONVERT_EXPR, itype, oldval); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); iaddr = fold_convert (build_pointer_type (itype), addr); } oldival2 = create_tmp_var (itype, NULL); label = create_artificial_label (); x = build1 (LABEL_EXPR, void_type_node, label); gimplify_and_add (x, pre_p); x = build2 (MODIFY_EXPR, void_type_node, newval, rhs); gimplify_and_add (x, pre_p); if (newval != newival) { x = build1 (VIEW_CONVERT_EXPR, itype, newval); x = build2 (MODIFY_EXPR, void_type_node, newival, x); gimplify_and_add (x, pre_p); } x = build2 (MODIFY_EXPR, void_type_node, oldival2, fold_convert (itype, oldival)); gimplify_and_add (x, pre_p); args = tree_cons (NULL, fold_convert (itype, newival), NULL); args = tree_cons (NULL, fold_convert (itype, oldival), args); args = tree_cons (NULL, iaddr, args); x = build_function_call_expr (cmpxchg, args); if (oldval == oldival) x = fold_convert (type, x); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); /* For floating point, be prepared for the loop backedge. */ if (oldval != oldival) { x = build1 (VIEW_CONVERT_EXPR, type, oldival); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ x = build3 (COND_EXPR, void_type_node, build2 (NE_EXPR, boolean_type_node, oldival, oldival2), build1 (GOTO_EXPR, void_type_node, label), NULL); gimplify_and_add (x, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. */ static enum gimplify_status gimplify_omp_atomic_mutex (tree *expr_p, tree *pre_p, tree addr, tree rhs) { tree t; t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); t = build_fold_indirect_ref (addr); t = build2 (MODIFY_EXPR, void_type_node, t, rhs); gimplify_and_add (t, pre_p); t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify an OMP_ATOMIC statement. */ static enum gimplify_status gimplify_omp_atomic (tree *expr_p, tree *pre_p) { tree addr = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); index = exact_log2 (index); if (index >= 0 && index <= 4) { enum gimplify_status gs; unsigned int align; if (DECL_P (TREE_OPERAND (addr, 0))) align = DECL_ALIGN_UNIT (TREE_OPERAND (addr, 0)); else if (TREE_CODE (TREE_OPERAND (addr, 0)) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)) == FIELD_DECL) align = DECL_ALIGN_UNIT (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)); else align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* When possible, use specialized atomic update functions. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { gs = gimplify_omp_atomic_fetch_op (expr_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ gs = gimplify_omp_atomic_pipeline (expr_p, pre_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } } /* The ultimate fallback is wrapping the operation in a mutex. */ return gimplify_omp_atomic_mutex (expr_p, pre_p, addr, rhs); } /* Gimplifies the expression tree pointed to by EXPR_P. Return 0 if gimplification failed. PRE_P points to the list where side effects that must happen before EXPR should be stored. POST_P points to the list where side effects that must happen after EXPR should be stored, or NULL if there is no suitable list. In that case, we copy the result to a temporary, emit the post-effects, and then return the temporary. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in tree-gimple.c. This test is used twice. Before gimplification, the test is invoked to determine whether *EXPR_P is already gimple enough. If that fails, *EXPR_P is gimplified according to its code and GIMPLE_TEST_F is called again. If the test still fails, then a new temporary variable is created and assigned the value of the gimplified expression. FALLBACK tells the function what sort of a temporary we want. If the 1 bit is set, an rvalue is OK. If the 2 bit is set, an lvalue is OK. If both are set, either is OK, but an lvalue is preferable. The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until solution. */ enum gimplify_status gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool (* gimple_test_f) (tree), fallback_t fallback) { tree tmp; tree internal_pre = NULL_TREE; tree internal_post = NULL_TREE; tree save_expr; int is_statement = (pre_p == NULL); location_t saved_location; enum gimplify_status ret; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ /* Set up our internal queues if needed. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; ret = GS_OK; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: /* LLVM LOCAL begin */ #ifdef ENABLE_LLVM /* Handle the LLVM "ARRAY_REF with pointer base" extension by treating pointer-based ARRAY_REFs as binary expressions. */ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (*expr_p, 0))) != ARRAY_TYPE) { /* LLVM LOCAL 8004649 */ gimplify_type_sizes (TREE_TYPE (*expr_p), pre_p); goto expr_2; } #endif /* LLVM LOCAL end */ case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); /* The distinction between MODIFY_EXPR and INIT_EXPR is no longer useful. */ if (*expr_p && TREE_CODE (*expr_p) == INIT_EXPR) TREE_SET_CODE (*expr_p, MODIFY_EXPR); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: ret = gimplify_boolean_expr (expr_p); break; case TRUTH_NOT_EXPR: TREE_OPERAND (*expr_p, 0) = gimple_boolify (TREE_OPERAND (*expr_p, 0)); ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; case CONVERT_EXPR: case NOP_EXPR: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: *expr_p = fold_indirect_ref (*expr_p); if (*expr_p != save_expr) break; /* else fall through. */ case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); recalculate_side_effects (*expr_p); break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else *expr_p = DECL_INITIAL (*expr_p); break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p); break; case EXC_PTR_EXPR: /* FIXME make this a decl. */ ret = GS_ALL_DONE; break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); break; case LABEL_EXPR: ret = GS_ALL_DONE; gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; tree temp = NULL_TREE; for (ix = 0; VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (*expr_p), ix, ce); ix++) if (TREE_SIDE_EFFECTS (ce->value)) append_to_statement_list (ce->value, &temp); *expr_p = temp; ret = GS_OK; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 0)); gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 1)); ret = GS_ALL_DONE; break; case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: gimplify_to_stmt_list (&CATCH_BODY (*expr_p)); ret = GS_ALL_DONE; break; case EH_FILTER_EXPR: gimplify_to_stmt_list (&EH_FILTER_FAILURE (*expr_p)); ret = GS_ALL_DONE; break; case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp) omp_notice_variable (gimplify_omp_ctxp, *expr_p, true); ret = GS_ALL_DONE; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case OMP_PARALLEL: ret = gimplify_omp_parallel (expr_p, pre_p); break; case OMP_FOR: ret = gimplify_omp_for (expr_p, pre_p); break; case OMP_SECTIONS: case OMP_SINGLE: ret = gimplify_omp_workshare (expr_p, pre_p); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: gimplify_to_stmt_list (&OMP_BODY (*expr_p)); break; case OMP_ATOMIC: ret = gimplify_omp_atomic (expr_p, pre_p); break; case OMP_RETURN: case OMP_CONTINUE: ret = GS_ALL_DONE; break; default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); if (!AGGREGATE_TYPE_P (type)) goto expr_2; else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR || TREE_CODE (*expr_p) == TRUTH_OR_EXPR || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR); goto expr_2; } recalculate_side_effects (*expr_p); dont_recalculate: break; } /* If we replaced *expr_p, gimplify again. */ if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr)) ret = GS_ALL_DONE; } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); *expr_p = build2 (MODIFY_EXPR, type, tmp, *expr_p); } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and replace the original statement with the gimplified form. */ if (fallback == fb_none || is_statement) { if (internal_pre || internal_post) { append_to_statement_list (*expr_p, &internal_pre); append_to_statement_list (internal_post, &internal_pre); annotate_all_with_locus (&internal_pre, input_location); *expr_p = internal_pre; } else if (!*expr_p) ; else if (TREE_CODE (*expr_p) == STATEMENT_LIST) annotate_all_with_locus (expr_p, input_location); else annotate_one_with_locus (*expr_p, input_location); goto out; } /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. */ /* If it's sufficiently simple already, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temp before adding the post-effects to the tree. */ if (!internal_post && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && !internal_post && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr (*expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp); } else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_rhs (*expr_p)) { gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. */ if (internal_post || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); else *expr_p = get_formal_tmp_var (*expr_p, pre_p); if (TREE_CODE (*expr_p) != SSA_NAME) DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1; } else { #ifdef ENABLE_CHECKING if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (internal_post) { annotate_all_with_locus (&internal_post, input_location); append_to_statement_list (internal_post, pre_p); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, tree *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to STMT_P. */ void gimplify_one_sizepos (tree *expr_p, tree *stmt_p) { tree type, expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; type = TREE_TYPE (expr); *expr_p = unshare_expr (expr); gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); expr = *expr_p; /* Verify that we've an exact type match with the original expression. In particular, we do not wish to drop a "sizetype" in favour of a type of similar dimensions. We don't want to pollute the generic type-stripping code with this knowledge because it doesn't matter for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT and friends retain their "sizetype-ness". */ if (TREE_TYPE (expr) != type && TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)) { tree tmp; *expr_p = create_tmp_var (type, NULL); tmp = build1 (NOP_EXPR, type, expr); tmp = build2 (MODIFY_EXPR, type, *expr_p, tmp); if (EXPR_HAS_LOCATION (expr)) SET_EXPR_LOCUS (tmp, EXPR_LOCUS (expr)); else SET_EXPR_LOCATION (tmp, input_location); gimplify_and_add (tmp, stmt_p); } } #ifdef ENABLE_CHECKING /* Compare types A and B for a "close enough" match. */ static bool cpt_same_type (tree a, tree b) { if (lang_hooks.types_compatible_p (a, b)) return true; /* ??? The C++ FE decomposes METHOD_TYPES to FUNCTION_TYPES and doesn't link them together. This routine is intended to catch type errors that will affect the optimizers, and the optimizers don't add new dereferences of function pointers, so ignore it. */ if ((TREE_CODE (a) == FUNCTION_TYPE || TREE_CODE (a) == METHOD_TYPE) && (TREE_CODE (b) == FUNCTION_TYPE || TREE_CODE (b) == METHOD_TYPE)) return true; /* ??? The C FE pushes type qualifiers after the fact into the type of the element from the type of the array. See build_unary_op's handling of ADDR_EXPR. This seems wrong -- if we were going to do this, we should have done it when creating the variable in the first place. Alternately, why aren't the two array types made variants? */ if (TREE_CODE (a) == ARRAY_TYPE && TREE_CODE (b) == ARRAY_TYPE) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); /* And because of those, we have to recurse down through pointers. */ if (POINTER_TYPE_P (a) && POINTER_TYPE_P (b)) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); return false; } /* Check for some cases of the front end missing cast expressions. The type of a dereference should correspond to the pointer type; similarly the type of an address should match its object. */ static tree check_pointer_types_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; tree ptype, otype, dtype; switch (TREE_CODE (t)) { case INDIRECT_REF: case ARRAY_REF: otype = TREE_TYPE (t); ptype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); gcc_assert (cpt_same_type (otype, dtype)); break; case ADDR_EXPR: ptype = TREE_TYPE (t); otype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); if (!cpt_same_type (otype, dtype)) { /* &array is allowed to produce a pointer to the element, rather than a pointer to the array type. We must allow this in order to properly represent assigning the address of an array in C into pointer to the element type. */ gcc_assert (TREE_CODE (otype) == ARRAY_TYPE && POINTER_TYPE_P (ptype) && cpt_same_type (TREE_TYPE (otype), dtype)); break; } break; default: return NULL_TREE; } return NULL_TREE; } #endif /* Gimplify the body of statements pointed to by BODY_P. FNDECL is the function decl containing BODY. */ void gimplify_body (tree *body_p, tree fndecl, bool do_parms) { location_t saved_location = input_location; tree body, parm_stmts; timevar_push (TV_TREE_GIMPLIFY); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (body_p, fndecl); unvisit_body (body_p, fndecl); /* Make sure input_location isn't set to something wierd. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = do_parms ? gimplify_parameters () : NULL; /* Gimplify the function's body. */ gimplify_stmt (body_p); body = *body_p; if (!body) body = alloc_stmt_list (); else if (TREE_CODE (body) == STATEMENT_LIST) { tree t = expr_only (*body_p); if (t) body = t; } /* If there isn't an outer BIND_EXPR, add one. */ if (TREE_CODE (body) != BIND_EXPR) { tree b = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (b) = 1; append_to_statement_list_force (body, &BIND_EXPR_BODY (b)); body = b; } /* If we had callee-copies statements, insert them at the beginning of the function. */ if (parm_stmts) { append_to_statement_list_force (BIND_EXPR_BODY (body), &parm_stmts); BIND_EXPR_BODY (body) = parm_stmts; } /* Unshare again, in case gimplification was sloppy. */ unshare_all_trees (body); *body_p = body; pop_gimplify_context (body); gcc_assert (gimplify_ctxp == NULL); #ifdef ENABLE_CHECKING walk_tree (body_p, check_pointer_types_r, NULL, NULL); #endif timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. */ void gimplify_function_tree (tree fndecl) { tree oldfn, parm, ret; oldfn = current_function_decl; current_function_decl = fndecl; cfun = DECL_STRUCT_FUNCTION (fndecl); if (cfun == NULL) allocate_struct_function (fndecl); for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = TREE_CHAIN (parm)) { /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (parm) && !needs_to_live_in_memory (parm)) DECL_COMPLEX_GIMPLE_REG_P (parm) = 1; } ret = DECL_RESULT (fndecl); if (TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE && !needs_to_live_in_memory (ret)) DECL_COMPLEX_GIMPLE_REG_P (ret) = 1; gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)) { tree tf, x, bind; tf = build2 (TRY_FINALLY_EXPR, void_type_node, NULL, NULL); TREE_SIDE_EFFECTS (tf) = 1; x = DECL_SAVED_TREE (fndecl); append_to_statement_list (x, &TREE_OPERAND (tf, 0)); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &TREE_OPERAND (tf, 1)); bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &BIND_EXPR_BODY (bind)); append_to_statement_list (tf, &BIND_EXPR_BODY (bind)); DECL_SAVED_TREE (fndecl) = bind; } current_function_decl = oldfn; cfun = oldfn ? DECL_STRUCT_FUNCTION (oldfn) : NULL; } /* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand (tree expr, tree *stmts, bool simple, tree var) { tree t; enum gimplify_status ret; gimple_predicate gimple_test_f; *stmts = NULL_TREE; if (is_gimple_val (expr)) return expr; gimple_test_f = simple ? is_gimple_val : is_gimple_reg_rhs; push_gimplify_context (); gimplify_ctxp->into_ssa = in_ssa_p; if (var) expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr); ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue); gcc_assert (ret != GS_ERROR); if (referenced_vars) { for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t)) add_referenced_var (t); } pop_gimplify_context (NULL); return expr; } /* Invokes force_gimple_operand for EXPR with parameters SIMPLE_P and VAR. If some statements are produced, emits them before BSI. */ tree force_gimple_operand_bsi (block_stmt_iterator *bsi, tree expr, bool simple_p, tree var) { tree stmts; expr = force_gimple_operand (expr, &stmts, simple_p, var); if (stmts) bsi_insert_before (bsi, stmts, BSI_SAME_STMT); return expr; } #include "gt-gimplify.h"
GB_binop__isle_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_01__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int16) // A*D function (colscale): GB (_AxD__isle_int16) // D*A function (rowscale): GB (_DxB__isle_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int16) // C=scalar+B GB (_bind1st__isle_int16) // C=scalar+B' GB (_bind1st_tran__isle_int16) // C=A+scalar GB (_bind2nd__isle_int16) // C=A'+scalar GB (_bind2nd_tran__isle_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT16 || GxB_NO_ISLE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
coriolis.c
/* This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license. Github repository: https://github.com/OpenNWP/GAME */ /* In this file, everything that is needed for calculating the vorticity flux term is prepared. */ #include <stdlib.h> #include <stdio.h> #include <geos95.h> #include "../../src/game_types.h" #include "../../src/game_constants.h" #include "include.h" int coriolis(int from_index_dual[], int to_index_dual[], int trsk_modified_curl_indices[], double normal_distance[], double normal_distance_dual[], int to_index[], double area[], double z_scalar[], double latitude_scalar[], double longitude_scalar[], double latitude_vector[], double longitude_vector[], double latitude_scalar_dual[], double longitude_scalar_dual[], double trsk_weights[], int trsk_indices[], int from_index[], int adjacent_vector_indices_h[], double z_vector[], double z_vector_dual[]) { /* This function implements the modified TRSK scheme proposed by Gassmann (2018). Indices and weights are computed here for the highest layer but remain unchanged elsewhere. */ int offset, sign_0, sign_1, no_of_edges, index_offset, vertex_index_candidate_0, vertex_index_candidate_1, counter, check_result, first_index, last_index; double check_sum, triangle_0, triangle_1, sum_of_weights; double rescale_for_z_offset_1d = (RADIUS + z_scalar[0])/(RADIUS + z_vector[0]); double rescale_for_z_offset_2d = pow(rescale_for_z_offset_1d, 2); // loop over all edges #pragma omp parallel for private(offset, sign_0, sign_1, no_of_edges, index_offset, vertex_index_candidate_0, vertex_index_candidate_1, counter, check_result, first_index, last_index, check_sum, triangle_0, triangle_1, sum_of_weights) for (int i = 0; i < NO_OF_VECTORS_H; ++i) { /* translation from TRSK paper (Thuburn et al., 2009): sign_0: t_{e, v_2} sign_1: n_{e', i} trsk_weights: w */ int *from_or_to_index = malloc(NO_OF_VECTORS_H*sizeof(int)); offset = 0; first_index = -1; last_index = -1; sum_of_weights = 0; // loop over all edges that are relevant for the reconstruction for (int k = 0; k < 10; ++k) { if (k == 0 || k == 5) { offset = 0; } if (k < 5) { index_offset = 0; sign_0 = -1; for (int l = 0; l < NO_OF_VECTORS_H; ++l) { from_or_to_index[l] = from_index[l]; } } else { index_offset = 5; sign_0 = 1; for (int l = 0; l < NO_OF_VECTORS_H; ++l) { from_or_to_index[l] = to_index[l]; } } if (adjacent_vector_indices_h[6*from_or_to_index[i] + k - index_offset] == i) { offset += 1; } if (offset > 1) { printf("Problem 1 in TRSK implementation detected.\n"); exit(1); } trsk_indices[10*i + k] = adjacent_vector_indices_h[6*from_or_to_index[i] + k - index_offset + offset]; if (trsk_indices[10*i + k] == -1) { trsk_weights[10*i + k] = 0; } else { // setting sign 1 sign_1 = -1; if (from_index[trsk_indices[10*i + k]] == from_or_to_index[i]) { sign_1 = 1; } // determining wether the cell is pentagonal or hexagonal if (from_or_to_index[i] < NO_OF_PENTAGONS) { no_of_edges = 5; } else { no_of_edges = 6; } // declaring some arrays we need int vertex_indices[no_of_edges]; int edge_indices[no_of_edges]; int indices_resorted[no_of_edges]; int vertex_indices_resorted[no_of_edges]; double latitude_vertices[no_of_edges]; double longitude_vertices[no_of_edges]; double latitude_edges[no_of_edges]; double longitude_edges[no_of_edges]; double vector_of_areas[no_of_edges]; // finding the vertex indices of the cell // initializing with impossible values for (int l = 0; l < no_of_edges; ++l) { vertex_indices[l] = -1; } counter = 0; for (int l = 0; l < no_of_edges; ++l) { vertex_index_candidate_0 = from_index_dual[adjacent_vector_indices_h[6*from_or_to_index[i] + l]]; vertex_index_candidate_1 = to_index_dual[adjacent_vector_indices_h[6*from_or_to_index[i] + l]]; check_result = in_bool_calculator(vertex_index_candidate_0, vertex_indices, no_of_edges); if (check_result == 0) { vertex_indices[counter] = vertex_index_candidate_0; latitude_vertices[counter] = latitude_scalar_dual[vertex_indices[counter]]; longitude_vertices[counter] = longitude_scalar_dual[vertex_indices[counter]]; ++counter; } check_result = in_bool_calculator(vertex_index_candidate_1, vertex_indices, no_of_edges); if (check_result == 0) { vertex_indices[counter] = vertex_index_candidate_1; latitude_vertices[counter] = latitude_scalar_dual[vertex_indices[counter]]; longitude_vertices[counter] = longitude_scalar_dual[vertex_indices[counter]]; ++counter; } } // checker wether all vertices have been found if (counter != no_of_edges) { printf("Problem 13 in TRSK implementation detected.\n"); exit(1); } // sorting the vertices in counter-clockwise direction sort_edge_indices(latitude_vertices, longitude_vertices, no_of_edges, indices_resorted); for (int l = 0; l < no_of_edges; ++l) { vertex_indices_resorted[l] = vertex_indices[indices_resorted[l]]; } // sorting the edges in counter-clockwise direction for (int l = 0; l < no_of_edges; ++l) { for (int m = 0; m < no_of_edges; ++m) { if ((from_index_dual[adjacent_vector_indices_h[6*from_or_to_index[i] + m]] == vertex_indices_resorted[l] && to_index_dual[adjacent_vector_indices_h[6*from_or_to_index[i] + m]] == vertex_indices_resorted[(l + 1)%no_of_edges]) || (to_index_dual[adjacent_vector_indices_h[6*from_or_to_index[i] + m]] == vertex_indices_resorted[l] && from_index_dual[adjacent_vector_indices_h[6*from_or_to_index[i] + m]] == vertex_indices_resorted[(l + 1)%no_of_edges])) { edge_indices[l] = adjacent_vector_indices_h[6*from_or_to_index[i] + m]; } } } for (int l = 0; l < no_of_edges; ++l) { latitude_edges[l] = latitude_vector[edge_indices[l]]; longitude_edges[l] = longitude_vector[edge_indices[l]]; } check_sum = 0; for (int l = 0; l < no_of_edges; ++l) { if (l == 0) { triangle_0 = calc_triangle_area(latitude_scalar[from_or_to_index[i]], longitude_scalar[from_or_to_index[i]], latitude_vertices[indices_resorted[l]], longitude_vertices[indices_resorted[l]], latitude_edges[no_of_edges - 1], longitude_edges[no_of_edges - 1]); } else { triangle_0 = calc_triangle_area(latitude_scalar[from_or_to_index[i]], longitude_scalar[from_or_to_index[i]], latitude_vertices[indices_resorted[l]], longitude_vertices[indices_resorted[l]], latitude_edges[l - 1], longitude_edges[l - 1]); } triangle_1 = calc_triangle_area(latitude_scalar[from_or_to_index[i]], longitude_scalar[from_or_to_index[i]], latitude_vertices[indices_resorted[l]], longitude_vertices[indices_resorted[l]], latitude_edges[l], longitude_edges[l]); vector_of_areas[l] = pow(RADIUS + z_vector[NO_OF_SCALARS_H + i], 2)*(triangle_0 + triangle_1); check_sum += vector_of_areas[l]; } // checking wether the triangles sum up to the cell area if (fabs(check_sum/(rescale_for_z_offset_2d*area[from_or_to_index[i]]) - 1) > EPSILON_SECURITY) { printf("Problem 30 in TRSK implementation detected. %lf\n", check_sum/(rescale_for_z_offset_2d*area[from_or_to_index[i]])); exit(1); } // we are summing in the counter-clockwise direction for (int l = 0; l < no_of_edges; ++l) { if (edge_indices[l] == i) { last_index = l; } if (edge_indices[l] == trsk_indices[10*i + k]) { first_index = (l + 1)%no_of_edges; } } sum_of_weights = double_sum_gen(vector_of_areas, no_of_edges, first_index, last_index); // dividing by the cell area sum_of_weights = sum_of_weights/(rescale_for_z_offset_2d*area[from_or_to_index[i]]); // checking for reliability if (sum_of_weights < 0 || sum_of_weights > 1) { printf("Problem 34 in TRSK implementation detected.\n"); exit(1); } // Eq. (33) of the TRSK paper trsk_weights[10*i + k] = sign_0*(sum_of_weights - 0.5)*sign_1; // weighting by geometrical grid prefactors, the minus sign accounts for the fact that our tangential direction is reversed compared to TRSK trsk_weights[10*i + k] = -rescale_for_z_offset_1d*normal_distance_dual[trsk_indices[10*i + k]]/normal_distance[NO_OF_SCALARS_H + i]*trsk_weights[10*i + k]; } } // modification following Gassmann (2018) // First off all, the indices need to be resorted. // As usual, the from cell is treated first. // First of all, it needs to be determined wether the cell at hand is pentagonal or hexagonal. no_of_edges = 6; if (from_index[i] < NO_OF_PENTAGONS) { no_of_edges = 5; } int trsk_indices_pre[10]; double trsk_weights_pre[10]; for (int j = 0; j < 10; ++j) { trsk_indices_pre[j] = trsk_indices[10*i + j]; trsk_weights_pre[j] = trsk_weights[10*i + j]; } int next_vertex_index, next_vertex_index_candidate; next_vertex_index = to_index_dual[i]; int indices_used[no_of_edges - 1]; int indices_used_counter = 0; for (int j = 0; j < no_of_edges - 1; ++j) { indices_used[j] = -1; } int value_written; for (int j = 0; j < no_of_edges - 1; ++j) { value_written = 0; for (int k = 0; k < no_of_edges - 1; ++k) { if ((from_index_dual[trsk_indices_pre[k]] == next_vertex_index || to_index_dual[trsk_indices_pre[k]] == next_vertex_index) && 0 == in_bool_calculator(k, indices_used, no_of_edges - 1) && value_written == 0) { trsk_indices[10*i + j] = trsk_indices_pre[k]; trsk_weights[10*i + j] = trsk_weights_pre[k]; indices_used[indices_used_counter] = k; indices_used_counter++; value_written = 1; } } next_vertex_index_candidate = to_index_dual[trsk_indices[10*i + j]]; if (next_vertex_index_candidate == next_vertex_index) { next_vertex_index = from_index_dual[trsk_indices[10*i + j]]; } else { next_vertex_index = next_vertex_index_candidate; } } // checking for reliability if (indices_used_counter != no_of_edges - 1) { printf("Problem 42 in TRSK implementation detected.\n"); exit(1); } // Then comes the to cell. // First of all it needs to be determined wether the cell at hand is pentagonal or hexagonal. no_of_edges = 6; if (to_index[i] < NO_OF_PENTAGONS) { no_of_edges = 5; } next_vertex_index = from_index_dual[i]; indices_used_counter = 0; for (int j = 0; j < no_of_edges - 1; ++j) { indices_used[j] = -1; } for (int j = 0; j < no_of_edges - 1; ++j) { value_written = 0; for (int k = 0; k < no_of_edges - 1; ++k) { if ((from_index_dual[trsk_indices_pre[5 + k]] == next_vertex_index || to_index_dual[trsk_indices_pre[5 + k]] == next_vertex_index) && 0 == in_bool_calculator(k, indices_used, no_of_edges - 1) && value_written == 0) { trsk_indices[10*i + 5 + j] = trsk_indices_pre[5 + k]; trsk_weights[10*i + 5 + j] = trsk_weights_pre[5 + k]; indices_used[indices_used_counter] = k; indices_used_counter++; value_written = 1; } } next_vertex_index_candidate = to_index_dual[trsk_indices[10*i + 5 + j]]; if (next_vertex_index_candidate == next_vertex_index) { next_vertex_index = from_index_dual[trsk_indices[10*i + 5 + j]]; } else { next_vertex_index = next_vertex_index_candidate; } } // checking for reliability if (indices_used_counter != no_of_edges - 1) { printf("Problem 43 in TRSK implementation detected.\n"); exit(1); } // Now the resorting itself can be executed. if (to_index[i] < NO_OF_PENTAGONS) { trsk_modified_curl_indices[10*i + 0] = trsk_indices[10*i + 8]; } else { trsk_modified_curl_indices[10*i + 0] = trsk_indices[10*i + 9]; } trsk_modified_curl_indices[10*i + 1] = trsk_indices[10*i + 0]; if (from_index[i] < NO_OF_PENTAGONS) { trsk_modified_curl_indices[10*i + 2] = trsk_indices[10*i + 3]; trsk_modified_curl_indices[10*i + 3] = trsk_indices[10*i + 5]; trsk_modified_curl_indices[10*i + 4] = 0; if (trsk_weights[10*i + 4] != 0) { printf("Problem 40 in TRSK implementation detected.\n"); exit(1); } } else { trsk_modified_curl_indices[10*i + 2] = trsk_indices[10*i + 2]; trsk_modified_curl_indices[10*i + 3] = trsk_indices[10*i + 4]; trsk_modified_curl_indices[10*i + 4] = trsk_indices[10*i + 5]; } if (from_index[i] < NO_OF_PENTAGONS) { trsk_modified_curl_indices[10*i + 5] = trsk_indices[10*i + 3]; } else { trsk_modified_curl_indices[10*i + 5] = trsk_indices[10*i + 4]; } trsk_modified_curl_indices[10*i + 6] = trsk_indices[10*i + 5]; if (to_index[i] < NO_OF_PENTAGONS) { trsk_modified_curl_indices[10*i + 7] = trsk_indices[10*i + 8]; trsk_modified_curl_indices[10*i + 8] = trsk_indices[10*i + 0]; trsk_modified_curl_indices[10*i + 9] = 0; if (trsk_weights[10*i + 9] != 0) { printf("Problem 41 in TRSK implementation detected.\n"); exit(1); } } else { trsk_modified_curl_indices[10*i + 7] = trsk_indices[10*i + 7]; trsk_modified_curl_indices[10*i + 8] = trsk_indices[10*i + 9]; trsk_modified_curl_indices[10*i + 9] = trsk_indices[10*i + 0]; } for (int j = 0; j < 10; ++j) { for (int k = j + 1; k < 10; ++k) { if (trsk_indices[10*i + j] == trsk_indices[10*i + k] && (trsk_weights[10*i + j] != 0 && trsk_weights[10*i + k] != 0)) { printf("Problem 29 in TRSK implementation detected.\n"); exit(1); } } } free(from_or_to_index); } int second_index; // This checks Eq. (39) of the first TRSK paper (Thuburn et al., 2009). double value_0, value_1; #pragma omp parallel for private(first_index, value_0, second_index, value_1, check_sum) for (int i = 0; i < NO_OF_VECTORS_H; ++i) { for (int j = 0; j < 10; ++j) { first_index = trsk_indices[10*i + j]; if (first_index != -1) { value_0 = normal_distance[NO_OF_SCALARS_H + i]/(rescale_for_z_offset_1d*normal_distance_dual[first_index])*trsk_weights[10*i + j]; second_index = -1; for (int k = 0; k < 10; ++k) { if (trsk_indices[10*first_index + k] == i) { second_index = 10*first_index + k; } } if (second_index == -1) { printf("Problem 38 in TRSK implementation detected.\n"); exit(1); } value_1 = normal_distance[NO_OF_SCALARS_H + first_index]/(rescale_for_z_offset_1d*normal_distance_dual[i])*trsk_weights[second_index]; check_sum = value_0 + value_1; if (fabs(check_sum) > EPSILON_SECURITY) { printf("Problem 39 in TRSK implementation detected.%lf\n", check_sum); exit(1); } } } } #pragma omp parallel for for (int i = 0; i < 10*NO_OF_VECTORS_H; ++i) { if (trsk_indices[i] == -1) { trsk_indices[i] = 0; } } return 0; }
simd_loop_private.c
/* Example using private variables in a simd construct The two function calls return intermediate results that must be stored in private variables to avoid a data race. Each SIMD lane has its own instance of a private variable. */ void simd_loop_private(double *a, double *b, double *c, int n) { int i; double t1, t2; #pragma omp simd private(t1, t2) for (i=0; i<n; i++) { t1 = func1(b[i], c[i]); t2 = func2(b[i], c[i]); a[i] = b[i] + c[i]; } }
GB_unop__isnan_bool_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fc32) // op(A') function: GB (_unop_tran__isnan_bool_fc32) // C type: bool // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = GB_cisnanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = GB_cisnanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fc32) ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
segmentation_omp.c
#include <stdlib.h> #include <float.h> #include <math.h> #include <omp.h> #include "image_io.h" #include "segmentation.h" void init_centers(byte_t *data, double *centers, int n_px, int n_ch, int n_clus); void assign_pixels(byte_t *data, double *centers, int *labels, double *dists, int *changes, int n_px, int n_ch, int n_clus); void update_centers(byte_t *data, double *centers, int *labels, double *dists, int n_px, int n_ch, int n_clus); void update_data(byte_t *data, double *centers, int *labels, int n_px, int n_ch); void compute_sse(double *sse, double *dists, int n_px); void kmeans_segm_omp(byte_t *data, int width, int height, int n_ch, int n_clus, int *n_iters, double *sse, int n_threads) { int n_px; int iter, max_iters; int changes; int *labels; double *centers; double *dists; max_iters = *n_iters; n_px = width * height; labels = malloc(n_px * sizeof(int)); centers = malloc(n_clus * n_ch * sizeof(double)); dists = malloc(n_px * sizeof(double)); omp_set_num_threads(n_threads); init_centers(data, centers, n_px, n_ch, n_clus); for (iter = 0; iter < max_iters; iter++) { assign_pixels(data, centers, labels, dists, &changes, n_px, n_ch, n_clus); if (!changes) { break; } update_centers(data, centers, labels, dists, n_px, n_ch, n_clus); } update_data(data, centers, labels, n_px, n_ch); compute_sse(sse, dists, n_px); *n_iters = iter; free(centers); free(labels); free(dists); } void init_centers(byte_t *data, double *centers, int n_px, int n_ch, int n_clus) { int k, ch, rnd; for (k = 0; k < n_clus; k++) { rnd = rand() % n_px; for (ch = 0; ch < n_ch; ch++) { centers[k * n_ch + ch] = data[rnd * n_ch + ch]; } } } void assign_pixels(byte_t *data, double *centers, int *labels, double *dists, int *changes, int n_px, int n_ch, int n_clus) { int px, ch, k; int min_k, tmp_changes = 0; double dist, min_dist, tmp; #pragma omp parallel for schedule(static) private(px, ch, k, min_k, dist, min_dist, tmp) for (px = 0; px < n_px; px++) { min_dist = DBL_MAX; for (k = 0; k < n_clus; k++) { dist = 0; for (ch = 0; ch < n_ch; ch++) { tmp = (double)(data[px * n_ch + ch] - centers[k * n_ch + ch]); dist += tmp * tmp; } if (dist < min_dist) { min_dist = dist; min_k = k; } } dists[px] = min_dist; if (labels[px] != min_k) { labels[px] = min_k; tmp_changes = 1; } } *changes = tmp_changes; } void update_centers(byte_t *data, double *centers, int *labels, double *dists, int n_px, int n_ch, int n_clus) { int px, ch, k; int *counts; int min_k, far_px; double max_dist; counts = malloc(n_clus * sizeof(int)); // Resetting centers and initializing clusters counters for (k = 0; k < n_clus; k++) { for (ch = 0; ch < n_ch; ch++) { centers[k * n_ch + ch] = 0; } counts[k] = 0; } // Computing partial sums of the centers and updating clusters counters #pragma omp parallel for private(px, ch, min_k) reduction(+:centers[:n_clus * n_ch],counts[:n_clus]) for (px = 0; px < n_px; px++) { min_k = labels[px]; for (ch = 0; ch < n_ch; ch++) { centers[min_k * n_ch + ch] += data[px * n_ch + ch]; } counts[min_k]++; } // Dividing to obtain the centers mean for (k = 0; k < n_clus; k++) { if (counts[k]) { for (ch = 0; ch < n_ch; ch++) { centers[k * n_ch + ch] /= counts[k]; } } else { // If the cluster is empty we find the farthest pixel from its cluster center max_dist = 0; for (px = 0; px < n_px; px++) { if (dists[px] > max_dist) { max_dist = dists[px]; far_px = px; } } for (ch = 0; ch < n_ch; ch++) { centers[k * n_ch + ch] = data[far_px * n_ch + ch]; } dists[far_px] = 0; } } free(counts); } void update_data(byte_t *data, double *centers, int *labels, int n_px, int n_ch) { int px, ch, min_k; #pragma omp parallel for schedule(static) private(px, ch, min_k) for (px = 0; px < n_px; px++) { min_k = labels[px]; for (ch = 0; ch < n_ch; ch++) { data[px * n_ch + ch] = (byte_t)round(centers[min_k * n_ch + ch]); } } } void compute_sse(double *sse, double *dists, int n_px) { int px; double res = 0; #pragma omp parallel for private(px) reduction(+:res) for (px = 0; px < n_px; px++) { res += dists[px]; } *sse = res; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at the mozilla.org home page #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor,ResInnerStride> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper; LhsMapper lhs(_lhs, lhsStride); RhsMapper rhs(_rhs, rhsStride); ResMapper res(_res, resStride, resIncr); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
Example_acquire_release.3.c
/* * @@name: acquire_release.3.c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> #include <omp.h> int main() { int x = 0, y = 0; #pragma omp parallel num_threads(2) { int thrd = omp_get_thread_num(); if (thrd == 0) { x = 10; #pragma omp flush // or with acq_rel or release clause #pragma omp atomic write // or with relaxed clause y = 1; } else { int tmp = 0; while (tmp == 0) { #pragma omp atomic read // or with relaxed clause tmp = y; } #pragma omp flush // or with acq_rel or acquire clause printf("x = %d\n", x); // always "x = 10" } } return 0; }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if (traits == UndefinedPixelTrait) continue; if (source_traits != UndefinedPixelTrait) SetPixelChannel(image,channel,p[i],q); else if (channel == AlphaPixelChannel) SetPixelChannel(image,channel,OpaqueAlpha,q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs((double) angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case LightenCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case ModulusAddCompositeOp: case ModulusSubtractCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case BlurCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) (QuantumRange- GetPixelBlack(source_image,p)); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { pixel=Sc+Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case ModulusSubtractCompositeOp: { pixel=Sc-Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sca); break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
applu.h
#ifdef _OPENMP #include <omp.h> #endif #include "RCCE.h" #include "npbparams.h" #include "applu_protos.h" //c--------------------------------------------------------------------- //c npbparams.h defines parameters that depend on the class and //c number of nodes //c--------------------------------------------------------------------- int ipr_default = 1; double omega_default = 1.2; double tolrsd1_def=1.0e-08, tolrsd2_def=1.0e-08, tolrsd3_def=1.0e-08, tolrsd4_def=1.0e-08, tolrsd5_def=1.0e-08; double c1 = 1.4, c2 = 0.40, c3 = 0.1, c4 = 1.0, c5 = 1.4; //c--------------------------------------------------------------------- //c grid //c--------------------------------------------------------------------- int nx, ny, nz; int nx0, ny0, nz0; int ipt, ist, iend; int jpt, jst, jend; int ii1, ii2; int ji1, ji2; int ki1, ki2; double dxi, deta, dzeta; double tx1, tx2, tx3; double ty1, ty2, ty3; double tz1, tz2, tz3; //c--------------------------------------------------------------------- //c dissipation //c--------------------------------------------------------------------- double dx1, dx2, dx3, dx4, dx5; double dy1, dy2, dy3, dy4, dy5; double dz1, dz2, dz3, dz4, dz5; double dssp; //c--------------------------------------------------------------------- //c field variables and residuals //c--------------------------------------------------------------------- double u[5*(isiz1+4)*(isiz2+4)*isiz3], rsd[5*(isiz1+4)*(isiz2+4)*isiz3], frct[5*(isiz1+4)*(isiz2+4)*isiz3], flux[5*(isiz1+2)*(isiz2+2)*isiz3]; //c--------------------------------------------------------------------- //c output control parameters //c--------------------------------------------------------------------- int ipr, inorm; //#pragma omp threadprivate(ipr, inorm) //c--------------------------------------------------------------------- //c newton-raphson iteration control parameters //c--------------------------------------------------------------------- int itmax, invert; double dt, omega, tolrsd[5], rsdnm[5], errnm[5], frc, ttotal; double a[5*5*isiz1*isiz2], b[5*5*isiz1*isiz2], c[5*5*isiz1*isiz2], d[5*5*isiz1*isiz2]; //c--------------------------------------------------------------------- //c coefficients of the exact solution //c--------------------------------------------------------------------- double ce[5*13]; //#pragma omp threadprivate(ce) //c--------------------------------------------------------------------- //c multi-processor common blocks //c--------------------------------------------------------------------- int id, ndim, num, xdim, ydim, row, col; int north,south,east,west; int npmax=isiz01+isiz02; // double buf[5*2*isiz2*isiz3], buf1[5*2*isiz2*isiz3]; double maxtime; //c--------------------------------------------------------------------- //c coordination flags //c--------------------------------------------------------------------- RCCE_FLAG flagsent[4], flagready[4]; double *buf1_exch_1; #ifdef _OPENMP #pragma omp threadprivate (nx, ny, nz, nx0, ny0, nz0, \ ipt, ist, iend, jpt, jst, jend, \ ii1, ii2, ji1, ji2, ki1, ki2, \ dxi, deta, dzeta, \ tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3) #pragma omp threadprivate (dx1, dx2, dx3, dx4, dx5, \ dy1, dy2, dy3, dy4, dy5, \ dz1, dz2, dz3, dz4, dz5, \ dssp) #pragma omp threadprivate(u, rsd, frct, flux) #pragma omp threadprivate(itmax, invert, \ dt, omega, tolrsd, rsdnm, errnm, frc, ttotal, \ a, b, c, d) #pragma omp threadprivate (id, ndim, num, xdim, ydim, row, col, \ north,south,east,west, flagsent, flagready, \ buf1_exch_1, npmax, maxtime) #endif //c--------------------------------------------------------------------- //c end of include file //c---------------------------------------------------------------------
pca_kmeans.c
#include "pca_kmeans.h" #include "kmeans_utils.h" #include "../../utils/matrix/csr_matrix/csr_to_vector_list.h" #include "../../utils/matrix/vector_list/vector_list_math.h" #include "../../utils/matrix/csr_matrix/csr_math.h" #include "../../utils/vector/common/common_vector_math.h" #include "../../utils/vector/sparse/sparse_vector_math.h" #include "../../utils/fcl_logging.h" #include <math.h> #include <unistd.h> #include <float.h> struct kmeans_result* pca_kmeans(struct csr_matrix* samples, struct kmeans_params *prms) { uint32_t i; uint64_t j; struct sparse_vector* pca_projection_samples; /* projection matrix of samples */ struct sparse_vector* pca_projection_clusters; /* projection matrix of clusters */ struct kmeans_result* res; uint32_t disable_optimizations; VALUE_TYPE* vector_lengths_pca_samples; VALUE_TYPE* vector_lengths_pca_clusters; /* pca_kmeans: contains all samples which are eligible for the cluster * no change optimization. */ uint32_t *eligible_for_cluster_no_change_optimization; struct general_kmeans_context ctx; pca_projection_clusters = NULL; pca_projection_samples = NULL; initialize_general_context(prms, &ctx, samples); disable_optimizations = prms->ext_vects == NULL; if (!disable_optimizations) { if (prms->kmeans_algorithm_id == ALGORITHM_PCA_KMEANS) { /* create pca projections for the samples */ pca_projection_samples = matrix_dot(samples, prms->ext_vects); calculate_vector_list_lengths(pca_projection_samples, samples->sample_count, &vector_lengths_pca_samples); } /* create pca projections for the clusters */ pca_projection_clusters = sparse_vectors_matrix_dot(ctx.cluster_vectors, ctx.no_clusters, prms->ext_vects); vector_lengths_pca_clusters = NULL; } eligible_for_cluster_no_change_optimization = (uint32_t*) calloc(ctx.samples->sample_count, sizeof(uint32_t)); for (i = 0; i < prms->iteration_limit && !ctx.converged && !prms->stop; i++) { /* track how many projection calculations were made / saved */ uint64_t saved_calculations_pca, saved_calculations_prev_cluster; uint64_t done_pca_calcs, saved_calculations_cauchy; /* reset all calculation counters */ done_pca_calcs = 0; saved_calculations_cauchy = 0; saved_calculations_prev_cluster = 0; saved_calculations_pca = 0; /* initialize data needed for the iteration */ pre_process_iteration(&ctx); free(vector_lengths_pca_clusters); calculate_vector_list_lengths(pca_projection_clusters, ctx.no_clusters, &vector_lengths_pca_clusters); #pragma omp parallel for schedule(dynamic, 1000) for (j = 0; j < ctx.samples->sample_count; j++) { /* iterate over all samples */ VALUE_TYPE dist; uint64_t cluster_id, sample_id; struct sparse_vector pca_projection; pca_projection.nnz = 0; pca_projection.keys = NULL; pca_projection.values = NULL; if (omp_get_thread_num() == 0) check_signals(&(prms->stop)); if (!prms->stop) { sample_id = j; for (cluster_id = 0; cluster_id < ctx.no_clusters; cluster_id++) { /* iterate over all cluster centers */ /* if we are not in the first iteration and this cluster is empty, continue to next cluster */ if (i != 0 && ctx.cluster_counts[cluster_id] == 0) continue; if (!disable_optimizations) { /* pca_kmeans */ /* we already know the distance to the cluster from last iteration */ if (cluster_id == ctx.previous_cluster_assignments[sample_id]) continue; /* clusters which did not move in the last iteration can be skipped if the sample is eligible */ if (eligible_for_cluster_no_change_optimization[sample_id] && ctx.clusters_not_changed[cluster_id]) { /* cluster did not move and sample was eligible for this check. distance to this cluster can not be less than to our best from last iteration */ saved_calculations_prev_cluster += 1; goto end; } /* evaluate cauchy approximation. fast but not good */ dist = lower_bound_euclid(ctx.vector_lengths_clusters[cluster_id] , ctx.vector_lengths_samples[sample_id]); if (dist >= ctx.cluster_distances[sample_id]) { /* approximated distance is larger than current best distance. skip full distance calculation */ saved_calculations_cauchy += 1; goto end; } if (prms->kmeans_algorithm_id == ALGORITHM_PCA_KMEANS) { /* evaluate pca approximation. using precalculated feature map*/ dist = euclid_vector(pca_projection_samples[sample_id].keys , pca_projection_samples[sample_id].values , pca_projection_samples[sample_id].nnz , pca_projection_clusters[cluster_id].keys , pca_projection_clusters[cluster_id].values , pca_projection_clusters[cluster_id].nnz , vector_lengths_pca_samples[sample_id] , vector_lengths_pca_clusters[cluster_id]); } else { /* evaluate pca approximation. feature mapping is done on demand */ if (pca_projection.keys == NULL) { vector_matrix_dot(pca_projection_samples[sample_id].keys, pca_projection_samples[sample_id].values, pca_projection_samples[sample_id].nnz, prms->ext_vects, &pca_projection); } dist = euclid_vector(pca_projection.keys, pca_projection.values, pca_projection.nnz , pca_projection_clusters[cluster_id].keys , pca_projection_clusters[cluster_id].values , pca_projection_clusters[cluster_id].nnz , ctx.vector_lengths_samples[sample_id] , ctx.vector_lengths_clusters[cluster_id]); } done_pca_calcs += 1; if (dist >= ctx.cluster_distances[sample_id] && fabs(dist - ctx.cluster_distances[sample_id]) >= 1e-6) { /* approximated distance is larger than current best distance. skip full distance calculation */ saved_calculations_pca += 1; goto end; } } /* printf("Approximated dist = %.4f - %.4f", dist, ctx.cluster_distances[sample_id]); */ /* if we reached this point we need to calculate a full euclidean distance */ dist = euclid_vector_list(ctx.samples, sample_id, ctx.cluster_vectors, cluster_id , ctx.vector_lengths_samples, ctx.vector_lengths_clusters); /* printf("actual dist = %.4f\n", dist); */ ctx.done_calculations += 1; if (dist < ctx.cluster_distances[sample_id]) { /* replace current best distance with new distance */ ctx.cluster_distances[sample_id] = dist; ctx.cluster_assignments[sample_id] = cluster_id; } end:; } } if (!disable_optimizations) { free_null(pca_projection.keys); free_null(pca_projection.values); } } post_process_iteration(&ctx, prms); /* shift clusters to new position */ calculate_shifted_clusters(&ctx); switch_to_shifted_clusters(&ctx); if (!disable_optimizations) { /* update only projections for cluster that shifted */ update_dot_products(ctx.cluster_vectors, ctx.no_clusters, prms->ext_vects, ctx.clusters_not_changed, pca_projection_clusters); d_add_ilist(&(prms->tr), "iteration_pca_calcs", done_pca_calcs); d_add_ilist(&(prms->tr), "iteration_pca_calcs_success", saved_calculations_pca + saved_calculations_cauchy); #pragma omp parallel for for (j = 0; j < ctx.samples->sample_count; j++) { /* iterate over all samples */ VALUE_TYPE previous_distance; previous_distance = ctx.cluster_distances[j]; /* if the cluster did move. calculate the new distance to this sample */ if (ctx.clusters_not_changed[ctx.cluster_assignments[j]] == 0) { ctx.cluster_distances[j] = euclid_vector_list(ctx.samples, j , ctx.cluster_vectors, ctx.cluster_assignments[j] , ctx.vector_lengths_samples , ctx.vector_lengths_clusters); /*#pragma omp critical*/ ctx.done_calculations += 1; ctx.total_no_calcs += 1; } /* if the cluster moved towards this sample, * then this sample is eligible to skip calculations to centers which * did not move in the last iteration */ if (ctx.cluster_distances[j] <= previous_distance) { eligible_for_cluster_no_change_optimization[j] = 1; } else { eligible_for_cluster_no_change_optimization[j] = 0; } } } else { /* naive k-means without any optimization remembers nothing from * the previous iteration. */ for (j = 0; j < ctx.samples->sample_count; j++) { ctx.cluster_distances[j] = DBL_MAX; } } print_iteration_summary(&ctx, prms, i); /* print projection statistics */ if (prms->verbose) LOG_INFO("PCA statistics c:%" PRINTF_INT64_MODIFIER "u/b:%" PRINTF_INT64_MODIFIER "u/db:%" PRINTF_INT64_MODIFIER "u/pc:%" PRINTF_INT64_MODIFIER "u" , saved_calculations_cauchy , saved_calculations_pca , done_pca_calcs , saved_calculations_prev_cluster); } if (prms->verbose) LOG_INFO("total total_no_calcs = %" PRINTF_INT64_MODIFIER "u", ctx.total_no_calcs); res = create_kmeans_result(prms, &ctx); /* cleanup all */ if (!disable_optimizations) { if (prms->kmeans_algorithm_id == ALGORITHM_PCA_KMEANS) { free_vector_list(pca_projection_samples, samples->sample_count); free(vector_lengths_pca_samples); free(pca_projection_samples); } free_vector_list(pca_projection_clusters, ctx.no_clusters); free(pca_projection_clusters); free(vector_lengths_pca_clusters); } free_general_context(&ctx, prms); free_null(eligible_for_cluster_no_change_optimization); return res; }
subteam.c
#include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ #define NUMELEMENT 10000000 static double a[NUMELEMENT]; static void init(void) { int i=0,j; i=i+5; /*assume onthread 1,3 */ #pragma omp for for (i=0;i<NUMELEMENT;i++) { a[i]=(double)i/2.0; a[i]=(double)i/2.0; a[i]=(double)i/2.0; a[i]=(double)i/2.0; } /*default team, on all threads*/ #pragma omp single { j=omp_get_thread_num(); printf("I am the single one: %d\n",j ); } } int main(void) { #pragma omp parallel { init(); } return 0; }
BEIntegratorWave.h
/*! * @file BEIntegratorWave.h * @author Michal Merta * @author Jan Zapletal * @date December 12, 2013 * @brief Header file for class BEIntegratorWave * */ #ifndef BEINTEGRATORWAVE_H #define BEINTEGRATORWAVE_H #include <algorithm> #include "BEIntegrator.h" #ifndef EXPERIMENTAL_WAVE // use ordinary basis functions described in Sauter, Veit: Retarded time-domain... namespace bem4i { /*! * class for integrators for Sauter-Veit time domain BEM for wave equation * */ template<class LO, class SC> class BEIntegratorWave : public BEIntegrator<LO, SC, BEIntegratorWave<LO, SC> > { // to get inner type of complex numbers (for Helmholtz) typedef typename GetType<LO, SC>::SCVT SCVT; // we have to enable BEIntegrator to use kernel evaluation private methods friend class BEIntegrator<LO, SC, BEIntegratorWave<LO, SC> >; public: //! default constructor BEIntegratorWave( ); //! copy constructor BEIntegratorWave( const BEIntegratorWave& orig ); //! constructor taking BESpace as the argument BEIntegratorWave( BESpace<LO, SC>* space, int* quadratureOrder, int timeQuadOrder, int* quadratureOrderDisjointElems = nullptr, int nChebIntervals = 20, int nChebPoints = 20 ); //! destructor virtual ~BEIntegratorWave( ); //! returns element matrix of single layer potential void computeElemMatrix1Layer( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of double layer potential void computeElemMatrix2Layer( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of hypersingular operator void computeElemMatrixHypersingular( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of hypersingular operator void computeElemMatrixHypersingular( LO outerElem, LO innerElem, FullMatrix<LO, SC>& V, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of single layer potential for regular pairs void computeElemMatrixHypersingularDisjointP1P1( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! sets current time inline void setCurrentTime( SC time ) { currentTime = time; } //! returns current time inline SC getCurrentTime( ) { return currentTime; } //! sets current temporal basis (i) and test (j) functions inline void setCurrentFunctions( LO i, LO j ) { this->currBasisF = i; this->currTestF = j; } inline void setCurrentLegendreOrder( LO orderBasis, LO orderTest ) { this->currLegOrderBasis = orderBasis; this->currLegOrderTest = orderTest; computeChebyshevForHypersingular( ); computeChebyshevFor1Layer( ); } //! auxiliary function which generates right hand side void getDirichletRHS( Vector<LO, SC> &rhs ) const; //! auxiliary function which generates right hand side void getNeumannRHS( Vector<LO, SC> &rhs ) const; //! auxiliary function which generates right hand side void getNeumannRHS( Vector<LO, SC> &rhs, SC( *incWave ) ( SCVT, SCVT*, SCVT* ) ) const; //! evaluates i-th temporal basis function in time t multiplied by the Legendre polynomial of order order SCVT evalB( SCVT t, LO i, LO m = 0 ) const; /* * evaluates double layer potential in points x, stores values in * preallocated vector values * * @param[in] x pointer to array with evaluation points * @param[in] n number of evaluation points * @param[in] density Neumann data * @param[out] values preallocated vector for storing results */ void doubleLayerPotential( const SCVT *x, LO nPoints, SCVT t, const Vector<LO, SC> & density, Vector<LO, SC> & values ) const { doubleLayerPotentialP1( x, nPoints, t, density, values ); } /* * evaluates double layer potential in points x * for p1 density, * stores values in preallocated vector values * * @param[in] x pointer to array with evaluation points * @param[in] n number of evaluation points * @param[in] density discretized density function * @param[out] values preallocated vector for storing results */ void doubleLayerPotentialP1( const SCVT *x, LO n, SCVT t, const Vector<LO, SC> & density, Vector<LO, SC> & values ) const; protected: //! returns specific kernel evaluated in given points (x, y) SC evalDoubleLayerKernel( const SCVT *x, const SCVT *y, const SCVT* n ) const { // here we have to do some fancy integration! std::cout << "Not implemented!" << std::endl; return 0.0; }; //! evaluates Legendre polynomial SCVT evalLegendrePolynomial( SCVT t, int order = 0 ) const; //! evaluates the first derivative of Legendre polynomial SCVT evalLegendrePolynomialDot( SCVT t, int order = 0 ) const; //! evaluates the second derivative of Legendre polynomial SCVT evalLegendrePolynomialDDot( SCVT t, int order = 0 ) const; //! evaluates h_a,b function SCVT evalErf( SCVT t, SCVT a, SCVT b ) const; //! evaluates the i-th partition of unity bump function in time t SCVT evalPUM( SCVT t, LO i ) const; //! evaluates first time derivative of the h_a,b function SCVT evalErfDot( SCVT t, SCVT a, SCVT b ) const; //! evaluates second time derivative of the h_a,b function SCVT evalErfDDot( SCVT t, SCVT a, SCVT b ) const; // evaluates first time derivative of the i-th PUM function SCVT evalPUMDot( SCVT t, LO i ) const; // evaluates second time derivative of the i-th PUM function SCVT evalPUMDDot( SCVT t, LO i ) const; // evaluates time derivative of the i-th temporal basis function SCVT evalBDot( SCVT t, LO i, LO m = 0 ) const; // evaluates second time derivative of the i-th temporal basis function SCVT evalBDDot( SCVT t, LO i, LO m = 0 ) const; //! current time SCVT currentTime; //! current temporal test function LO currTestF; //! current temporal basis function LO currBasisF; //! current order of Legendre polynomial LO currLegOrderBasis; //! current order of Legendre polynomial LO currLegOrderTest; //! time step SCVT dt; //! number of time steps LO N; //! order of temporal Gauss integration int timeQuadOrder; //! number of Chebyshev intervals for psi interpolation int nChebIntervals; //! number of Chebyshev points for psi interpolation int nChebPoints; //! chebyshev interval double chebDelta; //! beginnings of the Chebyshev interpolation intervals SCVT *chebIntervalStarts; //! coefficients for Cheb. interpolation - pointers to arrays - one for each Cheb. subinterval SCVT **psiChebCoeffs; SCVT **psiTildeChebCoeffs; SCVT **psi1LayerChebCoeffs; //! integrates the product of current test and basis function over given interval SCVT integrate1Layer( SCVT start, SCVT end, SCVT r ) const; //! integrates the product of current test and basis function over given interval SCVT integrateHypersingularPart2( SCVT start, SCVT end, SCVT r ) const; //! integrates the product of current test and basis function over given interval SCVT integrateHypersingularPart1( SCVT start, SCVT end, SCVT r ) const; /* * returns local matrix for Helmholtz hypersingular operator with p0p0 approximation * * @param[in] outerElem index of outer element * @param[in] innerElem index of inner element * @param[out] matrix preallocated local matrix */ void computeElemMatrixHypersingularSauterSchwabP1P1( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! computes coefficients of Chebyshev interpolation for efficient approximation of function psiddot, psidot void computeChebyshevForHypersingular( ); //! computes coefficients of Chebyshev interpolation for efficient approximation of function psiddot, psidot void computeChebyshevFor1Layer( ); //! computes zeros of Chebyshev interpolant over interval a, b void chebyshevZeros( SC a, SC b, SC *zeros ) const; /* * computes coefficients of the Chebyshev interpolant * * @param[in] fx values of a given function in Cheb. zeros * @param[out] coeffs coefficients of the Chebyshev interpolation */ void chebyshevCoefficients( SC * fx, SC *coeffs ) const; private: SC uIncNeu( SCVT t, SCVT * x, SCVT * n, int type ) const; //! computes Chebyshev interpolation in point t, inte SC chebyshevInterpolant( const SC a, const SC b, const SC * const coeffs, const SC t ) const; //! returns specific kernel evaluated in given points (x, y) SC evalSingleLayerKernel( const SCVT *x, const SCVT *y ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0; SC b = 0; int interval; for ( interval = 0; interval < nChebIntervals; interval++ ) { if ( norm > chebIntervalStarts[interval] && norm <= chebIntervalStarts[interval + 1] ) { a = chebIntervalStarts[interval]; b = chebIntervalStarts[interval + 1]; break; } } ///std::cout << chebyshevInterpolant(a, b, psiTildeChebCoeffs[interval-1], norm) << " , " <<integrateHypersingularPart1( intStart, intEnd, norm ) << std::endl; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psi1LayerChebCoeffs[interval], norm ) ); //return ( ( PI_FACT / norm ) * integrate1Layer( intStart, intEnd, norm ) ); }; SC evalSingleLayerKernel( SCVT x1, SCVT x2, SCVT x3, SCVT y1, SCVT y2, SCVT y3 ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x1 - y1 )*( x1 - y1 ) + ( x2 - y2 )*( x2 - y2 ) + ( x3 - y3 )*( x3 - y3 ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0; SC b = 0; int interval; for ( interval = 0; interval < nChebIntervals; interval++ ) { if ( norm > chebIntervalStarts[interval] && norm <= chebIntervalStarts[interval + 1] ) { a = chebIntervalStarts[interval]; b = chebIntervalStarts[interval + 1]; break; } } ///std::cout << chebyshevInterpolant(a, b, psiTildeChebCoeffs[interval-1], norm) << " , " <<integrateHypersingularPart1( intStart, intEnd, norm ) << std::endl; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psi1LayerChebCoeffs[interval], norm ) ); //return ( ( PI_FACT / norm ) * integrate1Layer( intStart, intEnd, norm ) ); }; //! returns specific kernel evaluated in given points (x, y) // TODO evaluate laplace kernel separately and multiply later SC evalHypersingularPart1( const SCVT *x, const SCVT *y ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0; SC b = 0; int interval; for ( interval = 0; interval < nChebIntervals; interval++ ) { if ( norm > chebIntervalStarts[interval] && norm <= chebIntervalStarts[interval + 1] ) { a = chebIntervalStarts[interval]; b = chebIntervalStarts[interval + 1]; break; } } //std::cout << norm << std::endl; ///std::cout << chebyshevInterpolant(a, b, psiTildeChebCoeffs[interval-1], norm) << " , " <<integrateHypersingularPart1( intStart, intEnd, norm ) << std::endl; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiTildeChebCoeffs[interval], norm ) ); //return( integrateHypersingularPart1( intStart, intEnd, norm ) ); //return ( ( PI_FACT / norm ) * integrateHypersingularPart1( intStart, intEnd, norm ) ); }; //#pragma omp declare simd simdlen( DATA_WIDTH ) SC evalHypersingularPart1( SCVT x1, SCVT x2, SCVT x3, SCVT y1, SCVT y2, SCVT y3 ) const { SCVT diff1 = x1 - y1; SCVT diff2 = x2 - y2; SCVT diff3 = x3 - y3; SCVT norm = std::sqrt( diff1 * diff1 + diff2 * diff2 + diff3 * diff3 ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0; SC b = 0; int interval = (int) ( ( norm - chebIntervalStarts[0] ) / chebDelta ); a = chebIntervalStarts[ interval ]; b = chebIntervalStarts[ interval + 1]; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiTildeChebCoeffs[interval], norm ) ); }; //! returns specific kernel evaluated in given points (x, y) // TODO evaluate laplace kernel separately and multiply later SC evalHypersingularPart2( const SCVT *x, const SCVT *y ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0.0, b = 0.0; int interval; for ( interval = 0; interval < nChebIntervals; interval++ ) { if ( norm > chebIntervalStarts[interval] && norm <= chebIntervalStarts[interval + 1] ) { a = chebIntervalStarts[interval]; b = chebIntervalStarts[interval + 1]; break; } } //std::cout<< interval<<std::endl; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiChebCoeffs[interval], norm ) ); //return ( ( PI_FACT / norm ) * integrateHypersingularPart2( intStart, intEnd, norm ) ); }; //#pragma omp declare simd simdlen( DATA_WIDTH ) SC evalHypersingularPart2( SCVT x1, SCVT x2, SCVT x3, SCVT y1, SCVT y2, SCVT y3 ) const { SCVT diff1 = x1 - y1; SCVT diff2 = x2 - y2; SCVT diff3 = x3 - y3; SCVT norm = std::sqrt( diff1 * diff1 + diff2 * diff2 + diff3 * diff3 ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0; SC b = 0; int interval = (int) ( ( norm - chebIntervalStarts[0] ) / chebDelta ); a = chebIntervalStarts[ interval ]; b = chebIntervalStarts[ interval + 1]; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiChebCoeffs[interval], norm ) ); }; SC evalHypersingular( SCVT x1, SCVT x2, SCVT x3, SCVT y1, SCVT y2, SCVT y3, SCVT & kernel, SCVT & kernelDDot ) const { SCVT diff1 = x1 - y1; SCVT diff2 = x2 - y2; SCVT diff3 = x3 - y3; SCVT norm = std::sqrt( diff1 * diff1 + diff2 * diff2 + diff3 * diff3 ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0; SC b = 0; int interval = (int) ( ( norm - chebIntervalStarts[0] ) / chebDelta ); a = chebIntervalStarts[ interval ]; b = chebIntervalStarts[ interval + 1]; kernel = ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiTildeChebCoeffs[interval], norm ) ); kernelDDot = ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiChebCoeffs[interval], norm ) ); }; }; } #else // use experimental basis functions which are not a partition of unity namespace bem4i { /*! * class for integrators for Sauter-Veit time domain BEM for wave equation * */ template<class LO, class SC> class BEIntegratorWave : public BEIntegrator<LO, SC, BEIntegratorWave<LO, SC> > { // to get inner type of complex numbers (for Helmholtz) typedef typename GetType<LO, SC>::SCVT SCVT; // we have to enable BEIntegrator to use kernel evaluation private methods friend class BEIntegrator<LO, SC, BEIntegratorWave<LO, SC> >; public: //! default constructor BEIntegratorWave( ); //! copy constructor BEIntegratorWave( const BEIntegratorWave& orig ); //! constructor taking BESpace as the argument BEIntegratorWave( BESpace<LO, SC>* space, int* quadratureOrder, int timeQuadOrder, int nPre = 0, int nPos = 3, int nChebIntervals = 20, int nChebPoints = 20 ); //! destructor virtual ~BEIntegratorWave( ); //! returns element matrix of single layer potential void computeElemMatrix1Layer( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of double layer potential void computeElemMatrix2Layer( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of hypersingular operator void computeElemMatrixHypersingular( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of hypersingular operator void computeElemMatrixHypersingular( LO outerElem, LO innerElem, FullMatrix<LO, SC>& V, FullMatrix<LO, SC>& matrix ) const; //! sets current time inline void setCurrentTime( SC time ) { currentTime = time; } //! returns current time inline SC getCurrentTime( ) { return currentTime; } //! sets current temporal basis (i) and test (j) functions inline void setCurrentFunctions( LO i, LO j ) { this->currBasisF = i; this->currTestF = j; computeChebyshevForHypersingular( ); //computeChebyshevFor1Layer( ); } //! auxiliary function which generates right hand side void getDirichletRHS( Vector<LO, SC> &rhs ) const; //! auxiliary function which generates right hand side void getNeumannRHS( Vector<LO, SC> &rhs ) const; protected: //! returns specific kernel evaluated in given points (x, y) SC evalSingleLayerKernel( const SCVT *x, const SCVT *y ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; if ( currTestF == 0 ) { testStart = 0.0; testEnd = dt; } else if ( currTestF == N - 1 ) { testStart = ( N - 2 ) * dt; testEnd = ( N - 1 ) * dt; } else { testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; } if ( currBasisF == 0 ) { basisStart = norm; basisEnd = dt + norm; } else if ( currBasisF == N - 1 ) { basisStart = ( N - 2 ) * dt + norm; basisEnd = ( N - 1 ) * dt + norm; } else { basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; } SCVT intStart = std::min( std::max( testStart, basisStart ), ( N - 1 ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } return ( ( PI_FACT / norm ) * integrate1Layer( intStart, intEnd, norm ) ); }; //! returns specific kernel evaluated in given points (x, y) // TODO evaluate laplace kernel separately and multiply later SC evalHypersingularPart1( const SCVT *x, const SCVT *y ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; SCVT intStart = std::min( std::max( testStart, basisStart ), ( N + nPos ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a, b; int interval; for ( interval = 0; interval < nChebIntervals; interval++ ) { if ( norm > chebIntervalStarts[interval] && norm < chebIntervalStarts[interval + 1] ) { a = chebIntervalStarts[interval]; b = chebIntervalStarts[interval + 1]; break; } } ///std::cout << chebyshevInterpolant(a, b, psiTildeChebCoeffs[interval-1], norm) << " , " <<integrateHypersingularPart1( intStart, intEnd, norm ) << std::endl; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiTildeChebCoeffs[interval], norm ) ); //return( integrateHypersingularPart1( intStart, intEnd, norm ) ); //return ( ( PI_FACT / norm ) * integrateHypersingularPart1( intStart, intEnd, norm ) ); }; //! returns specific kernel evaluated in given points (x, y) // TODO evaluate laplace kernel separately and multiply later SC evalHypersingularPart2( const SCVT *x, const SCVT *y ) const { // here we have to do some fancy integration! SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); // check whether the supports are overlapping SCVT testStart, testEnd; SCVT basisStart, basisEnd; testStart = ( currTestF - 1 ) * dt; testEnd = ( currTestF + 1 ) * dt; basisStart = ( currBasisF - 1 ) * dt + norm; basisEnd = ( currBasisF + 1 ) * dt + norm; SCVT intStart = std::min( std::max( testStart, basisStart ), ( N + nPos ) * dt ); SCVT intEnd = std::min( basisEnd, testEnd ); if ( intStart >= intEnd ) { return 0.0; } SC a = 0.0, b = 0.0; int interval; for ( interval = 0; interval < nChebIntervals; interval++ ) { if ( norm > chebIntervalStarts[interval] && norm < chebIntervalStarts[interval + 1] ) { a = chebIntervalStarts[interval]; b = chebIntervalStarts[interval + 1]; break; } } //std::cout<< interval<<std::endl; return ( ( PI_FACT / norm ) * chebyshevInterpolant( a, b, psiChebCoeffs[interval], norm ) ); //return ( ( PI_FACT / norm ) * integrateHypersingularPart2( intStart, intEnd, norm ) ); }; //! returns specific kernel evaluated in given points (x, y) SC evalDoubleLayerKernel( const SCVT *x, const SCVT *y, const SCVT* n ) const { // here we have to do some fancy integration! std::cout << "Not implemented!" << std::endl; return 0.0; }; SCVT evalLegendrePolynomial( SCVT t, int order = 0 ) const { // for now we just have polynomial of 0-th order return 1.0; } //! evaluates h_a,b function SCVT evalErf( SCVT t, SCVT a, SCVT b ) const; //! evaluates the i-th partition of unity bump function in time t SCVT evalPUM( SCVT t, LO i ) const; //! evaluates i-th temporal basis function in time t multiplied by the Legendre polynomial of order order SCVT evalB( SCVT t, LO i, LO m = 0 ) const; //! evaluates first time derivative of the h_a,b function SCVT evalErfDot( SCVT t, SCVT a, SCVT b ) const; //! evaluates second time derivative of the h_a,b function SCVT evalErfDDot( SCVT t, SCVT a, SCVT b ) const; // evaluates first time derivative of the i-th PUM function SCVT evalPUMDot( SCVT t, LO i ) const; // evaluates second time derivative of the i-th PUM function SCVT evalPUMDDot( SCVT t, LO i ) const; // evaluates time derivative of the i-th temporal basis function SCVT evalBDot( SCVT t, LO i, LO m = 0 ) const; // evaluates second time derivative of the i-th temporal basis function SCVT evalBDDot( SCVT t, LO i, LO m = 0 ) const; //! current time SCVT currentTime; //! current temporal test function LO currTestF; //! current temporal basis function LO currBasisF; //! time step SCVT dt; //! number of time steps LO N; //! order of temporal Gauss integration int timeQuadOrder; //! number of Chebyshev intervals for psi interpolation int nChebIntervals; //! number of Chebyshev points for psi interpolation int nChebPoints; //! number of pre points int nPre; //! number of post points int nPos; //! beginnings of the Chebyshev interpolation intervals SCVT *chebIntervalStarts; //! coefficients for Cheb. interpolation - pointers to arrays - one for each Cheb. subinterval SCVT **psiChebCoeffs; SCVT **psiTildeChebCoeffs; //! integrates the product of current test and basis function over given interval SCVT integrate1Layer( SCVT start, SCVT end, SCVT r ) const; //! integrates the product of current test and basis function over given interval SCVT integrateHypersingularPart2( SCVT start, SCVT end, SCVT r ) const; //! integrates the product of current test and basis function over given interval SCVT integrateHypersingularPart1( SCVT start, SCVT end, SCVT r ) const; /* * returns local matrix for Helmholtz hypersingular operator with p0p0 approximation * * @param[in] outerElem index of outer element * @param[in] innerElem index of inner element * @param[out] matrix preallocated local matrix */ void computeElemMatrixHypersingularSauterSchwabP1P1( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! computes coefficients of Chebyshev interpolation for efficient approximation of function psiddot, psidot void computeChebyshevForHypersingular( ); //! computes zeros of Chebyshev interpolant over interval a, b void chebyshevZeros( SC a, SC b, SC *zeros ) const; /* * computes coefficients of the Chebyshev interpolant * * @param[in] fx values of a given function in Cheb. zeros * @param[out] coeffs coefficients of the Chebyshev interpolation */ void chebyshevCoefficients( SC *fx, SC *coeffs ) const; //! computes Chebyshev interpolation in point t, inte SC chebyshevInterpolant( SC a, SC b, SC *coeffs, SC t ) const; }; } #endif // include .cpp file to overcome linking problems due to templates #include "BEIntegratorWave.cpp" #endif /* BEINTEGRATORWAVE_H */
GB_binop__ne_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint64) // A*D function (colscale): GB (_AxD__ne_uint64) // D*A function (rowscale): GB (_DxB__ne_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint64) // C=scalar+B GB (_bind1st__ne_uint64) // C=scalar+B' GB (_bind1st_tran__ne_uint64) // C=A+scalar GB (_bind2nd__ne_uint64) // C=A'+scalar GB (_bind2nd_tran__ne_uint64) // C type: bool // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT64 || GxB_NO_NE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rawSHA384_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2010 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. * * Rewritten Spring 2013, JimF. SSE code added and released with the following terms: * No copyright is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the public * domain is deemed null and void, then the software is Copyright (c) 2011 JimF * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawSHA384; #elif FMT_REGISTERS_H john_register_one(&fmt_rawSHA384); #else #include "arch.h" #include "sha2.h" #include "stdint.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "formats.h" //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-SHA384" #define FORMAT_NAME "" #define FORMAT_TAG "$SHA384$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #ifdef SIMD_COEF_64 #define PLAINTEXT_LENGTH 111 #else #define PLAINTEXT_LENGTH 125 #endif #define CIPHERTEXT_LENGTH 96 #define BINARY_SIZE DIGEST_SIZE #define DIGEST_SIZE 48 #define DIGEST_SIZE_512 64 #define BINARY_ALIGN 8 #define SALT_SIZE 0 #define SALT_ALIGN 1 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"a8b64babd0aca91a59bdbb7761b421d4f2bb38280d3a75ba0f21f2bebc45583d446c598660c94ce680c47d19c30783a7", "password"}, {"$SHA384$a8b64babd0aca91a59bdbb7761b421d4f2bb38280d3a75ba0f21f2bebc45583d446c598660c94ce680c47d19c30783a7", "password"}, {"$SHA384$8cafed2235386cc5855e75f0d34f103ccc183912e5f02446b77c66539f776e4bf2bf87339b4518a7cb1c2441c568b0f8", "12345678"}, {"$SHA384$38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", ""}, {"94e75dd8e1f16d7df761d76c021ad98c283791008b98368e891f411fc5aa1a83ef289e348abdecf5e1ba6971604a0cb0", "UPPERCASE"}, {NULL} }; #ifdef SIMD_COEF_64 #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 ) static ARCH_WORD_64 (*saved_key); static ARCH_WORD_64 (*crypt_out); #else static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_64 (*crypt_out)[DIGEST_SIZE / sizeof(ARCH_WORD_64)]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifndef SIMD_COEF_64 saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); #else saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * DIGEST_SIZE_512 / sizeof(ARCH_WORD_64), sizeof(*crypt_out), MEM_ALIGN_SIMD); #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); #ifndef SIMD_COEF_64 MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += 8; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } void *get_binary(char *ciphertext) { static ARCH_WORD_64 *outw; unsigned char *out; char *p; int i; if (!outw) outw = mem_calloc_tiny(DIGEST_SIZE, BINARY_ALIGN); out = (unsigned char*)outw; p = ciphertext + TAG_LENGTH; for (i = 0; i < DIGEST_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_64 alter_endianity_to_BE64(out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha384_reverse(outw); #endif #endif return out; } #ifdef SIMD_COEF_64 #define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64 + 3*SIMD_COEF_64) static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][3] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][3] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][3] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][3] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][3] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][3] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][3] & PH_MASK_6; } #endif static int binary_hash_0(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_6; } static void set_key(char *key, int index) { #ifdef SIMD_COEF_64 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t)); const ARCH_WORD_64 *wkey = is_aligned(key, sizeof(uint64_t)) ? (ARCH_WORD_64*)key : (ARCH_WORD_64*)strcpy(buf_aligned, key); #endif ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; ARCH_WORD_64 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_64 temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24)); len+=3; goto key_cleaning; } if (!(temp & 0xff00000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32)); len+=4; goto key_cleaning; } if (!(temp & 0xff0000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40)); len+=5; goto key_cleaning; } if (!(temp & 0xff000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48)); len+=6; goto key_cleaning; } if (!(temp & 0xff00000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56)); len+=7; goto key_cleaning; } *keybuf_word = JOHNSWAP64(temp); len += 8; keybuf_word += SIMD_COEF_64; } *keybuf_word = 0x8000000000000000ULL; key_cleaning: keybuf_word += SIMD_COEF_64; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_64; } keybuffer[15*SIMD_COEF_64] = len << 3; #else int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_64 unsigned i; ARCH_WORD_64 s; static char out[PLAINTEXT_LENGTH + 1]; unsigned char *wucp = (unsigned char*)saved_key; s = ((ARCH_WORD_64*)saved_key)[15*SIMD_COEF_64 + (index&(SIMD_COEF_64-1)) + index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] >> 3; for(i = 0; i < (unsigned)s; i++) out[i] = wucp[ GETPOS(i, index) ]; out[i] = 0; return (char*) out; #else saved_key[index][saved_len[index]] = 0; return saved_key[index]; #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64], &crypt_out[index/SIMD_COEF_64*8*SIMD_COEF_64], NULL, SSEi_REVERSE_STEPS|SSEi_MIXED_IN|SSEi_CRYPT_SHA384); #else SHA512_CTX ctx; SHA384_Init(&ctx); SHA384_Update(&ctx, saved_key[index], saved_len[index]); SHA384_Final((unsigned char *)crypt_out[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((ARCH_WORD_64*)binary)[3] == crypt_out[HASH_IDX]) #else if ( ((ARCH_WORD_64*)binary)[0] == crypt_out[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 return ((ARCH_WORD_64*)binary)[3] == crypt_out[HASH_IDX]; #else return *(ARCH_WORD_64*)binary == crypt_out[index][0]; #endif } static int cmp_exact(char *source, int index) { ARCH_WORD_64 *binary = get_binary(source); char *key = get_key(index); SHA512_CTX ctx; ARCH_WORD_64 crypt_out[DIGEST_SIZE / sizeof(ARCH_WORD_64)]; SHA384_Init(&ctx); SHA384_Update(&ctx, key, strlen(key)); SHA384_Final((unsigned char*)crypt_out, &ctx); #ifdef SIMD_COEF_64 alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha384_reverse(crypt_out); #endif #endif return !memcmp(binary, crypt_out, DIGEST_SIZE); } struct fmt_main fmt_rawSHA384 = { { FORMAT_LABEL, FORMAT_NAME, "SHA384 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__identity_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int64_uint8 // op(A') function: GB_unop_tran__identity_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int64_uint8 ( int64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lu.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "applu.h" #if defined(_OPENMP) /* for thread synchronization */ static boolean flag[ISIZ1/2*2+1]; #endif /* _OPENMP */ /* function declarations */ static void blts (int nx, int ny, int nz, int k, double omega, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double ldz[ISIZ1][ISIZ2][5][5], double ldy[ISIZ1][ISIZ2][5][5], double ldx[ISIZ1][ISIZ2][5][5], double d[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ); static void buts(int nx, int ny, int nz, int k, double omega, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double tv[ISIZ1][ISIZ2][5], double d[ISIZ1][ISIZ2][5][5], double udx[ISIZ1][ISIZ2][5][5], double udy[ISIZ1][ISIZ2][5][5], double udz[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ); static void domain(void); static void erhs(void); static void error(void); static void exact( int i, int j, int k, double u000ijk[5] ); static void jacld(int k); static void jacu(int k); static void l2norm (int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double sum[5]); static void pintgr(void); static void read_input(void); static void rhs(void); static void setbv(void); static void setcoeff(void); static void setiv(void); static void ssor(void); static void verify(double xcr[5], double xce[5], double xci, char *cclass, boolean *verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc, char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char cclass; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); #pragma omp parallel { /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify ( rsdnm, errnm, frc, &cclass, &verified ); mflops = (double)itmax*(1984.77*(double)nx0 *(double)ny0 *(double)nz0 -10923.3*pow2((double)( nx0+ny0+nz0 )/3.0) +27770.9* (double)( nx0+ny0+nz0 )/3.0 -144010.0) / (maxtime*1000000.0); c_print_results("LU", cclass, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts (int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double ldz[ISIZ1][ISIZ2][5][5], double ldy[ISIZ1][ISIZ2][5][5], double ldx[ISIZ1][ISIZ2][5][5], double d[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, m; double tmp, tmp1; double tmat[5][5]; #pragma omp for nowait schedule(static) for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (m = 0; m < 5; m++) { v[i][j][k][m] = v[i][j][k][m] - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0] + ldz[i][j][m][1] * v[i][j][k-1][1] + ldz[i][j][m][2] * v[i][j][k-1][2] + ldz[i][j][m][3] * v[i][j][k-1][3] + ldz[i][j][m][4] * v[i][j][k-1][4] ); } } } #pragma omp for nowait schedule(static) for (i = ist; i <= iend; i++) { #if defined(_OPENMP) if (i != ist) { while (flag[i-1] == 0) { #pragma omp flush(flag) ; } } if (i != iend) { while (flag[i] == 1) { #pragma omp flush(flag) ; } } #endif /* _OPENMP */ for (j = jst; j <= jend; j++) { for (m = 0; m < 5; m++) { v[i][j][k][m] = v[i][j][k][m] - omega * ( ldy[i][j][m][0] * v[i][j-1][k][0] + ldx[i][j][m][0] * v[i-1][j][k][0] + ldy[i][j][m][1] * v[i][j-1][k][1] + ldx[i][j][m][1] * v[i-1][j][k][1] + ldy[i][j][m][2] * v[i][j-1][k][2] + ldx[i][j][m][2] * v[i-1][j][k][2] + ldy[i][j][m][3] * v[i][j-1][k][3] + ldx[i][j][m][3] * v[i-1][j][k][3] + ldy[i][j][m][4] * v[i][j-1][k][4] + ldx[i][j][m][4] * v[i-1][j][k][4] ); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[ 1][1]; tmp = tmp1 * tmat[ 2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } #if defined(_OPENMP) if (i != ist) flag[i-1] = 0; if (i != iend) flag[i] = 1; #pragma omp flush(flag) #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double tv[ISIZ1][ISIZ2][5], double d[ISIZ1][ISIZ2][5][5], double udx[ISIZ1][ISIZ2][5][5], double udy[ISIZ1][ISIZ2][5][5], double udz[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, m; double tmp, tmp1; double tmat[5][5]; #pragma omp for nowait schedule(static) for (i = iend; i >= ist; i--) { for (j = jend; j >= jst; j--) { for (m = 0; m < 5; m++) { tv[i][j][m] = omega * ( udz[i][j][m][0] * v[i][j][k+1][0] + udz[i][j][m][1] * v[i][j][k+1][1] + udz[i][j][m][2] * v[i][j][k+1][2] + udz[i][j][m][3] * v[i][j][k+1][3] + udz[i][j][m][4] * v[i][j][k+1][4] ); } } } #pragma omp for nowait schedule(static) for (i = iend; i >= ist; i--) { #if defined(_OPENMP) if (i != iend) { while (flag[i+1] == 0) { #pragma omp flush(flag) ; } } if (i != ist) { while (flag[i] == 1) { #pragma omp flush(flag) ; } } #endif /* _OPENMP */ for (j = jend; j >= jst; j--) { for (m = 0; m < 5; m++) { tv[i][j][m] = tv[i][j][m] + omega * ( udy[i][j][m][0] * v[i][j+1][k][0] + udx[i][j][m][0] * v[i+1][j][k][0] + udy[i][j][m][1] * v[i][j+1][k][1] + udx[i][j][m][1] * v[i+1][j][k][1] + udy[i][j][m][2] * v[i][j+1][k][2] + udx[i][j][m][2] * v[i+1][j][k][2] + udy[i][j][m][3] * v[i][j+1][k][3] + udx[i][j][m][3] * v[i+1][j][k][3] + udy[i][j][m][4] * v[i][j+1][k][4] + udx[i][j][m][4] * v[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } #if defined(_OPENMP) if (i != iend) flag[i+1] = 0; if (i != ist) flag[i] = 1; #pragma omp flush(flag) #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain(void) { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if ( nx < 4 || ny < 4 || nz < 4 ) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); exit(1); } if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs(void) { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; int L1, L2; int ist1, iend1; int jst1, jend1; double dsspm; double xi, eta, zeta; double q; double u21, u31, u41; double tmp; double u21i, u31i, u41i, u51i; double u21j, u31j, u41j, u51j; double u21k, u31k, u41k, u51k; double u21im1, u31im1, u41im1, u51im1; double u21jm1, u31jm1, u41jm1, u51jm1; double u21km1, u31km1, u41km1, u51km1; dsspm = dssp; #pragma omp for for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { for (k = 0; k < nz; k++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = 0.0; } } } } #pragma omp for for (i = 0; i < nx; i++) { iglob = i; xi = ( (double)(iglob) ) / ( nx0 - 1 ); for (j = 0; j < ny; j++) { jglob = j; eta = ( (double)(jglob) ) / ( ny0 - 1 ); for (k = 0; k < nz; k++) { zeta = ( (double)(k) ) / ( nz - 1 ); for (m = 0; m < 5; m++) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx-1; #pragma omp for for (i = L1; i <= L2; i++) { for (j = jst; j <= jend; j++) { for (k = 1; k < nz - 1; k++) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21; } } } #pragma omp for for (j = jst; j <= jend; j++) { for (k = 1; k <= nz - 2; k++) { for (i = ist; i <= iend; i++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] ); } } for (i = ist; i <= L2; i++) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i-1][j][k][0]; u21im1 = tmp * rsd[i-1][j][k][1]; u31im1 = tmp * rsd[i-1][j][k][2]; u41im1 = tmp * rsd[i-1][j][k][3]; u51im1 = tmp * rsd[i-1][j][k][4]; flux[i][j][k][1] = (4.0/3.0) * tx3 * ( u21i - u21im1 ); flux[i][j][k][2] = tx3 * ( u31i - u31im1 ); flux[i][j][k][3] = tx3 * ( u41i - u41im1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i ) - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) ) + (1.0/6.0) * tx3 * ( u21i*u21i - u21im1*u21im1 ) + C1 * C5 * tx3 * ( u51i - u51im1 ); } for (i = ist; i <= iend; i++) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * ( rsd[i-1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i+1][j][k][0] ); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] ) + dx2 * tx1 * ( rsd[i-1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i+1][j][k][1] ); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] ) + dx3 * tx1 * ( rsd[i-1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i+1][j][k][2] ); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] ) + dx4 * tx1 * ( rsd[i-1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i+1][j][k][3] ); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] ) + dx5 * tx1 * ( rsd[i-1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * ( + 5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m] ); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * ( - 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m] ); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <=iend1; i++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i-2][j][k][m] - 4.0 * rsd[i-1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i+1][j][k][m] + rsd[i+2][j][k][m] ); } } for (m = 0; m < 5; m++) { frct[nx-3][j][k][m] = frct[nx-3][j][k][m] - dsspm * ( rsd[nx-5][j][k][m] - 4.0 * rsd[nx-4][j][k][m] + 6.0 * rsd[nx-3][j][k][m] - 4.0 * rsd[nx-2][j][k][m] ); frct[nx-2][j][k][m] = frct[nx-2][j][k][m] - dsspm * ( rsd[nx-4][j][k][m] - 4.0 * rsd[nx-3][j][k][m] + 5.0 * rsd[nx-2][j][k][m] ); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny-1; #pragma omp for for (i = ist; i <= iend; i++) { for (j = L1; j <= L2; j++) { for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31; } } } #pragma omp for for (i = ist; i <= iend; i++) { for (k = 1; k <= nz - 2; k++) { for (j = jst; j <= jend; j++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] ); } } for (j = jst; j <= L2; j++) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j-1][k][0]; u21jm1 = tmp * rsd[i][j-1][k][1]; u31jm1 = tmp * rsd[i][j-1][k][2]; u41jm1 = tmp * rsd[i][j-1][k][3]; u51jm1 = tmp * rsd[i][j-1][k][4]; flux[i][j][k][1] = ty3 * ( u21j - u21jm1 ); flux[i][j][k][2] = (4.0/3.0) * ty3 * ( u31j - u31jm1 ); flux[i][j][k][3] = ty3 * ( u41j - u41jm1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j ) - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) ) + (1.0/6.0) * ty3 * ( u31j*u31j - u31jm1*u31jm1 ) + C1 * C5 * ty3 * ( u51j - u51jm1 ); } for (j = jst; j <= jend; j++) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * ( rsd[i][j-1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j+1][k][0] ); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] ) + dy2 * ty1 * ( rsd[i][j-1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j+1][k][1] ); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] ) + dy3 * ty1 * ( rsd[i][j-1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j+1][k][2] ); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] ) + dy4 * ty1 * ( rsd[i][j-1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j+1][k][3] ); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] ) + dy5 * ty1 * ( rsd[i][j-1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j+1][k][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * ( + 5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m] ); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * ( - 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m] ); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i][j-2][k][m] - 4.0 * rsd[i][j-1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j+1][k][m] + rsd[i][j+2][k][m] ); } } for (m = 0; m < 5; m++) { frct[i][ny-3][k][m] = frct[i][ny-3][k][m] - dsspm * ( rsd[i][ny-5][k][m] - 4.0 * rsd[i][ny-4][k][m] + 6.0 * rsd[i][ny-3][k][m] - 4.0 * rsd[i][ny-2][k][m] ); frct[i][ny-2][k][m] = frct[i][ny-2][k][m] - dsspm * ( rsd[i][ny-4][k][m] - 4.0 * rsd[i][ny-3][k][m] + 5.0 * rsd[i][ny-2][k][m] ); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp for for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (k = 0; k <= nz-1; k++) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41; } for (k = 1; k <= nz - 2; k++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] ); } } for (k = 1; k <= nz-1; k++) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k-1][0]; u21km1 = tmp * rsd[i][j][k-1][1]; u31km1 = tmp * rsd[i][j][k-1][2]; u41km1 = tmp * rsd[i][j][k-1][3]; u51km1 = tmp * rsd[i][j][k-1][4]; flux[i][j][k][1] = tz3 * ( u21k - u21km1 ); flux[i][j][k][2] = tz3 * ( u31k - u31km1 ); flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k - u41km1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k ) - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) ) + (1.0/6.0) * tz3 * ( u41k*u41k - u41km1*u41km1 ) + C1 * C5 * tz3 * ( u51k - u51km1 ); } for (k = 1; k <= nz - 2; k++) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * ( rsd[i][j][k+1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k-1][0] ); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] ) + dz2 * tz1 * ( rsd[i][j][k+1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k-1][1] ); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] ) + dz3 * tz1 * ( rsd[i][j][k+1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k-1][2] ); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] ) + dz4 * tz1 * ( rsd[i][j][k+1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k-1][3] ); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] ) + dz5 * tz1 * ( rsd[i][j][k+1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k-1][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * ( + 5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m] ); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m] ); } for (k = 3; k <= nz - 4; k++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i][j][k-2][m] - 4.0 * rsd[i][j][k-1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k+1][m] + rsd[i][j][k+2][m] ); } } for (m = 0; m < 5; m++) { frct[i][j][nz-3][m] = frct[i][j][nz-3][m] - dsspm * ( rsd[i][j][nz-5][m] - 4.0 * rsd[i][j][nz-4][m] + 6.0 * rsd[i][j][nz-3][m] - 4.0 * rsd[i][j][nz-2][m] ); frct[i][j][nz-2][m] = frct[i][j][nz-2][m] - dsspm * ( rsd[i][j][nz-4][m] - 4.0 * rsd[i][j][nz-3][m] + 5.0 * rsd[i][j][nz-2][m] ); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error(void) { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; double tmp; double u000ijk[5]; for (m = 0; m < 5; m++) { errnm[m] = 0.0; } for (i = ist; i <= iend; i++) { iglob = i; for (j = jst; j <= jend; j++) { jglob = j; for (k = 1; k <= nz-2; k++) { exact( iglob, jglob, k, u000ijk ); for (m = 0; m < 5; m++) { tmp = ( u000ijk[m] - u[i][j][k][m] ); errnm[m] = errnm[m] + tmp *tmp; } } } } for (m = 0; m < 5; m++) { errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact( int i, int j, int k, double u000ijk[5] ) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi, eta, zeta; xi = ((double)i) / (nx0 - 1); eta = ((double)j) / (ny0 - 1); zeta = ((double)k) / (nz - 1); for (m = 0; m < 5; m++) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j; double r43; double c1345; double c34; double tmp1, tmp2, tmp3; r43 = ( 4.0 / 3.0 ); c1345 = C1 * C3 * C4 * C5; c34 = C3 * C4; #pragma omp for nowait schedule(static) for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 ); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][1] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) ); d[i][j][1][1] = 1.0 + dt * 2.0 * ( tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 ); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] ) + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) ); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 ); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][3] ) + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) ); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 ); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) ); d[i][j][4][1] = dt * 2.0 * ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] ); d[i][j][4][2] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] ); d[i][j][4][3] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] ); d[i][j][4][4] = 1.0 + dt * 2.0 * ( tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1 ) + dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 ); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k-1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = - dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = - dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = - dt * tz2 * ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] ); a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2 ; a[i][j][1][2] = 0.0; a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 ); a[i][j][1][4] = 0.0; a[i][j][2][0] = - dt * tz2 * ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] ); a[i][j][2][1] = 0.0; a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * ( c34 * tmp1 ) - dt * tz1 * dz3; a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 ); a[i][j][2][4] = 0.0; a[i][j][3][0] = - dt * tz2 * ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 ) + 0.50 * C2 * ( ( u[i][j][k-1][1] * u[i][j][k-1][1] + u[i][j][k-1][2] * u[i][j][k-1][2] + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) ) - dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] ); a[i][j][3][1] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][1] * tmp1 ) ); a[i][j][3][2] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][2] * tmp1 ) ); a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 ) * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * ( r43 * c34 * tmp1 ) - dt * tz1 * dz4; a[i][j][3][4] = - dt * tz2 * C2; a[i][j][4][0] = - dt * tz2 * ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1] + u[i][j][k-1][2] * u[i][j][k-1][2] + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 - C1 * ( u[i][j][k-1][4] * tmp1 ) ) * ( u[i][j][k-1][3] * tmp1 ) ) - dt * tz1 * ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1]) - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2]) - ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3]) - c1345 * tmp2 * u[i][j][k-1][4] ); a[i][j][4][1] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1]; a[i][j][4][2] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2]; a[i][j][4][3] = - dt * tz2 * ( C1 * ( u[i][j][k-1][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j][k-1][1]*u[i][j][k-1][1] + u[i][j][k-1][2]*u[i][j][k-1][2] + 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) ) - dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3]; a[i][j][4][4] = - dt * tz2 * ( C1 * ( u[i][j][k-1][3] * tmp1 ) ) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j-1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = - dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = - dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = - dt * ty2 * ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] ); b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy2; b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 ); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = - dt * ty2 * ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 ) + 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1] + u[i][j-1][k][2] * u[i][j-1][k][2] + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2 ) ) - dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] ); b[i][j][2][1] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][1] * tmp1 ) ); b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 ) * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * ( r43 * c34 * tmp1 ) - dt * ty1 * dy3; b[i][j][2][3] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][3] * tmp1 ) ); b[i][j][2][4] = - dt * ty2 * C2; b[i][j][3][0] = - dt * ty2 * ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] ); b[i][j][3][1] = 0.0; b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 ); b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = - dt * ty2 * ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1] + u[i][j-1][k][2] * u[i][j-1][k][2] + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2 - C1 * ( u[i][j-1][k][4] * tmp1 ) ) * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1])) - ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2])) - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3])) - c1345*tmp2*u[i][j-1][k][4] ); b[i][j][4][1] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1]; b[i][j][4][2] = - dt * ty2 * ( C1 * ( u[i][j-1][k][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j-1][k][1]*u[i][j-1][k][1] + 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2] + u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) ) - dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2]; b[i][j][4][3] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3]; b[i][j][4][4] = - dt * ty2 * ( C1 * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i-1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = - dt * tx1 * dx1; c[i][j][0][1] = - dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = - dt * tx2 * ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 ) + C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1] + u[i-1][j][k][2] * u[i-1][j][k][2] + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] ); c[i][j][1][1] = - dt * tx2 * ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * ( r43 * c34 * tmp1 ) - dt * tx1 * dx2; c[i][j][1][2] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][2] * tmp1 ) ); c[i][j][1][3] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][3] * tmp1 ) ); c[i][j][1][4] = - dt * tx2 * C2; c[i][j][2][0] = - dt * tx2 * ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] ); c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 ); c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = - dt * tx2 * ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] ); c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 ); c[i][j][3][2] = 0.0; c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = - dt * tx2 * ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1] + u[i-1][j][k][2] * u[i-1][j][k][2] + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 - C1 * ( u[i-1][j][k][4] * tmp1 ) ) * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) ) - c1345 * tmp2 * u[i-1][j][k][4] ); c[i][j][4][1] = - dt * tx2 * ( C1 * ( u[i-1][j][k][4] * tmp1 ) - 0.50 * C2 * ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1] + u[i-1][j][k][2]*u[i-1][j][k][2] + u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) ) - dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1]; c[i][j][4][2] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2]; c[i][j][4][3] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3]; c[i][j][4][4] = - dt * tx2 * ( C1 * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j; double r43; double c1345; double c34; double tmp1, tmp2, tmp3; r43 = ( 4.0 / 3.0 ); c1345 = C1 * C3 * C4 * C5; c34 = C3 * C4; #pragma omp for nowait schedule(static) #if defined(_OPENMP) for (i = iend; i >= ist; i--) { for (j = jend; j >= jst; j--) { #else for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #endif /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 ); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][1] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) ); d[i][j][1][1] = 1.0 + dt * 2.0 * ( tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 ); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] ) + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) ); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 ); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][3] ) + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) ); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 ); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) ); d[i][j][4][1] = dt * 2.0 * ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] ); d[i][j][4][2] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] ); d[i][j][4][3] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] ); d[i][j][4][4] = 1.0 + dt * 2.0 * ( tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1 ) + dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 ); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i+1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = - dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 ) + C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1] + u[i+1][j][k][2] * u[i+1][j][k][2] + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] ); a[i][j][1][1] = dt * tx2 * ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * ( r43 * c34 * tmp1 ) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * ( - C2 * ( u[i+1][j][k][2] * tmp1 ) ); a[i][j][1][3] = dt * tx2 * ( - C2 * ( u[i+1][j][k][3] * tmp1 ) ); a[i][j][1][4] = dt * tx2 * C2 ; a[i][j][2][0] = dt * tx2 * ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] ); a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 ); a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] ); a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 ); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1] + u[i+1][j][k][2] * u[i+1][j][k][2] + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 - C1 * ( u[i+1][j][k][4] * tmp1 ) ) * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) ) - c1345 * tmp2 * u[i+1][j][k][4] ); a[i][j][4][1] = dt * tx2 * ( C1 * ( u[i+1][j][k][4] * tmp1 ) - 0.50 * C2 * ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1] + u[i+1][j][k][2]*u[i+1][j][k][2] + u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) ) - dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1]; a[i][j][4][2] = dt * tx2 * ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2]; a[i][j][4][3] = dt * tx2 * ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3]; a[i][j][4][4] = dt * tx2 * ( C1 * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j+1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = - dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] ); b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 ); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 ) + 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1] + u[i][j+1][k][2] * u[i][j+1][k][2] + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2 ) ) - dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] ); b[i][j][2][1] = dt * ty2 * ( - C2 * ( u[i][j+1][k][1] * tmp1 ) ); b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 ) * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * ( r43 * c34 * tmp1 ) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * ( - C2 * ( u[i][j+1][k][3] * tmp1 ) ); b[i][j][2][4] = dt * ty2 * C2; b[i][j][3][0] = dt * ty2 * ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] ); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 ); b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1] + u[i][j+1][k][2] * u[i][j+1][k][2] + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2 - C1 * ( u[i][j+1][k][4] * tmp1 ) ) * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) ) - ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) ) - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) ) - c1345*tmp2*u[i][j+1][k][4] ); b[i][j][4][1] = dt * ty2 * ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1]; b[i][j][4][2] = dt * ty2 * ( C1 * ( u[i][j+1][k][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j+1][k][1]*u[i][j+1][k][1] + 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2] + u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) ) - dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2]; b[i][j][4][3] = dt * ty2 * ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3]; b[i][j][4][4] = dt * ty2 * ( C1 * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k+1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = - dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] ); c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2 ; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 ); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] ); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * ( c34 * tmp1 ) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 ); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 ) + 0.50 * C2 * ( ( u[i][j][k+1][1] * u[i][j][k+1][1] + u[i][j][k+1][2] * u[i][j][k+1][2] + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) ) - dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] ); c[i][j][3][1] = dt * tz2 * ( - C2 * ( u[i][j][k+1][1] * tmp1 ) ); c[i][j][3][2] = dt * tz2 * ( - C2 * ( u[i][j][k+1][2] * tmp1 ) ); c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 ) * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * ( r43 * c34 * tmp1 ) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * C2; c[i][j][4][0] = dt * tz2 * ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1] + u[i][j][k+1][2] * u[i][j][k+1][2] + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 - C1 * ( u[i][j][k+1][4] * tmp1 ) ) * ( u[i][j][k+1][3] * tmp1 ) ) - dt * tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) ) - ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) ) - c1345 * tmp2 * u[i][j][k+1][4] ); c[i][j][4][1] = dt * tz2 * ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1]; c[i][j][4][2] = dt * tz2 * ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2]; c[i][j][4][3] = dt * tz2 * ( C1 * ( u[i][j][k+1][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j][k+1][1]*u[i][j][k+1][1] + u[i][j][k+1][2]*u[i][j][k+1][2] + 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) ) - dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3]; c[i][j][4][4] = dt * tz2 * ( C1 * ( u[i][j][k+1][3] * tmp1 ) ) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm (int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double sum[5]) { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0; #pragma omp single for (m = 0; m < 5; m++) { sum[m] = 0.0; } #pragma omp for nowait for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (k = 1; k <= nz0-2; k++) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } #pragma omp critical { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } #pragma omp barrier #pragma omp single for (m = 0; m < 5; m++) { sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr(void) { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k; int ibeg, ifin, ifin1; int jbeg, jfin, jfin1; int iglob, iglob1, iglob2; int jglob, jglob1, jglob2; double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */ double frc1, frc2, frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = -1; iglob2 = nx-1; if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0; if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = -1; jglob1 = 0; jglob2 = ny-1; if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0; if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin -1; if (jfin1 == ji2) jfin1 = jfin -1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ for (i = 0; i <= ISIZ2+1; i++) { for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } for (i = ibeg; i <= ifin; i++) { iglob = i; for (j = jbeg; j <= jfin; j++) { jglob = j; k = ki1; phi1[i][j] = C2*( u[i][j][k][4] - 0.50 * ( pow2(u[i][j][k][1]) + pow2(u[i][j][k][2]) + pow2(u[i][j][k][3]) ) / u[i][j][k][0] ); k = ki2; phi2[i][j] = C2*( u[i][j][k][4] - 0.50 * ( pow2(u[i][j][k][1]) + pow2(u[i][j][k][2]) + pow2(u[i][j][k][3]) ) / u[i][j][k][0] ); } } frc1 = 0.0; for (i = ibeg; i <= ifin1; i++) { for (j = jbeg; j <= jfin1; j++) { frc1 = frc1 + ( phi1[i][j] + phi1[i+1][j] + phi1[i][j+1] + phi1[i+1][j+1] + phi2[i][j] + phi2[i+1][j] + phi2[i][j+1] + phi2[i+1][j+1] ); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ for (i = 0; i <= ISIZ2+1; i++) { for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { for (i = ibeg; i <= ifin; i++) { iglob = i; for (k = ki1; k <= ki2; k++) { phi1[i][k] = C2*( u[i][jbeg][k][4] - 0.50 * ( pow2(u[i][jbeg][k][1]) + pow2(u[i][jbeg][k][2]) + pow2(u[i][jbeg][k][3]) ) / u[i][jbeg][k][0] ); } } } jglob = jfin; if (jglob == ji2) { for (i = ibeg; i <= ifin; i++) { iglob = i; for (k = ki1; k <= ki2; k++) { phi2[i][k] = C2*( u[i][jfin][k][4] - 0.50 * ( pow2(u[i][jfin][k][1]) + pow2(u[i][jfin][k][2]) + pow2(u[i][jfin][k][3]) ) / u[i][jfin][k][0] ); } } } frc2 = 0.0; for (i = ibeg; i <= ifin1; i++) { for (k = ki1; k <= ki2-1; k++) { frc2 = frc2 + ( phi1[i][k] + phi1[i+1][k] + phi1[i][k+1] + phi1[i+1][k+1] + phi2[i][k] + phi2[i+1][k] + phi2[i][k+1] + phi2[i+1][k+1] ); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ for (i = 0; i <= ISIZ2+1; i++) { for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { for (j = jbeg; j <= jfin; j++) { jglob = j; for (k = ki1; k <= ki2; k++) { phi1[j][k] = C2*( u[ibeg][j][k][4] - 0.50 * ( pow2(u[ibeg][j][k][1]) + pow2(u[ibeg][j][k][2]) + pow2(u[ibeg][j][k][3]) ) / u[ibeg][j][k][0] ); } } } iglob = ifin; if (iglob == ii2) { for (j = jbeg; j <= jfin; j++) { jglob = j; for (k = ki1; k <= ki2; k++) { phi2[j][k] = C2*( u[ifin][j][k][4] - 0.50 * ( pow2(u[ifin][j][k][1]) + pow2(u[ifin][j][k][2]) + pow2(u[ifin][j][k][3]) ) / u[ifin][j][k][0] ); } } } frc3 = 0.0; for (j = jbeg; j <= jfin1; j++) { for (k = ki1; k <= ki2-1; k++) { frc3 = frc3 + ( phi1[j][k] + phi1[j+1][k] + phi1[j][k+1] + phi1[j+1][k+1] + phi2[j][k] + phi2[j+1][k] + phi2[j][k+1] + phi2[j+1][k+1] ); } } frc3 = deta * dzeta * frc3; frc = 0.25 * ( frc1 + frc2 + frc3 ); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input(void) { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - LU Benchmark\n\n"); fp = fopen("inputlu.data", "r"); if (fp != NULL) { printf(" Reading from input file inputlu.data\n"); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d", &ipr, &inorm); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d", &itmax); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf", &dt); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf", &omega); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf%lf%lf%lf%lf", &tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0); while(fgetc(fp) != '\n'); fclose(fp); } else { ipr = IPR_DEFAULT; inorm = INORM_DEFAULT; itmax = ITMAX_DEFAULT; dt = DT_DEFAULT; omega = OMEGA_DEFAULT; tolrsd[0] = TOLRSD1_DEF; tolrsd[1] = TOLRSD2_DEF; tolrsd[2] = TOLRSD3_DEF; tolrsd[3] = TOLRSD4_DEF; tolrsd[4] = TOLRSD5_DEF; nx0 = ISIZ1; ny0 = ISIZ2; nz0 = ISIZ3; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) { printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) { printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); printf(" Iterations: %3d\n", itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs(void) { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int L1, L2; int ist1, iend1; int jst1, jend1; double q; double u21, u31, u41; double tmp; double u21i, u31i, u41i, u51i; double u21j, u31j, u41j, u51j; double u21k, u31k, u41k, u51k; double u21im1, u31im1, u41im1, u51im1; double u21jm1, u31jm1, u41jm1, u51jm1; double u21km1, u31km1, u41km1, u51km1; #pragma omp for for (i = 0; i <= nx-1; i++) { for (j = 0; j <= ny-1; j++) { for (k = 0; k <= nz-1; k++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = - frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx-1; #pragma omp for for (i = L1; i <= L2; i++) { for (j = jst; j <= jend; j++) { for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * ( u[i][j][k][4] - q ); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21; } } } #pragma omp for for (j = jst; j <= jend; j++) { for (k = 1; k <= nz - 2; k++) { for (i = ist; i <= iend; i++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] ); } } L2 = nx-1; for (i = ist; i <= L2; i++) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i-1][j][k][0]; u21im1 = tmp * u[i-1][j][k][1]; u31im1 = tmp * u[i-1][j][k][2]; u41im1 = tmp * u[i-1][j][k][3]; u51im1 = tmp * u[i-1][j][k][4]; flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1); flux[i][j][k][2] = tx3 * ( u31i - u31im1 ); flux[i][j][k][3] = tx3 * ( u41i - u41im1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) ) - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) ) + (1.0/6.0) * tx3 * ( pow2(u21i) - pow2(u21im1) ) + C1 * C5 * tx3 * ( u51i - u51im1 ); } for (i = ist; i <= iend; i++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * ( u[i-1][j][k][0] - 2.0 * u[i][j][k][0] + u[i+1][j][k][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] ) + dx2 * tx1 * ( u[i-1][j][k][1] - 2.0 * u[i][j][k][1] + u[i+1][j][k][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] ) + dx3 * tx1 * ( u[i-1][j][k][2] - 2.0 * u[i][j][k][2] + u[i+1][j][k][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] ) + dx4 * tx1 * ( u[i-1][j][k][3] - 2.0 * u[i][j][k][3] + u[i+1][j][k][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] ) + dx5 * tx1 * ( u[i-1][j][k][4] - 2.0 * u[i][j][k][4] + u[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * ( + 5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m] ); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * ( - 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m] ); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <= iend1; i++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0 * u[i-1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i+1][j][k][m] + u[i+2][j][k][m] ); } } for (m = 0; m < 5; m++) { rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m] - dssp * ( u[nx-5][j][k][m] - 4.0 * u[nx-4][j][k][m] + 6.0 * u[nx-3][j][k][m] - 4.0 * u[nx-2][j][k][m] ); rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m] - dssp * ( u[nx-4][j][k][m] - 4.0 * u[nx-3][j][k][m] + 5.0 * u[nx-2][j][k][m] ); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny-1; #pragma omp for for (i = ist; i <= iend; i++) { for (j = L1; j <= L2; j++) { for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31; } } } #pragma omp for for (i = ist; i <= iend; i++) { for (k = 1; k <= nz - 2; k++) { for (j = jst; j <= jend; j++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] ); } } L2 = ny-1; for (j = jst; j <= L2; j++) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j-1][k][0]; u21jm1 = tmp * u[i][j-1][k][1]; u31jm1 = tmp * u[i][j-1][k][2]; u41jm1 = tmp * u[i][j-1][k][3]; u51jm1 = tmp * u[i][j-1][k][4]; flux[i][j][k][1] = ty3 * ( u21j - u21jm1 ); flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1); flux[i][j][k][3] = ty3 * ( u41j - u41jm1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) ) - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) ) + (1.0/6.0) * ty3 * ( pow2(u31j) - pow2(u31jm1) ) + C1 * C5 * ty3 * ( u51j - u51jm1 ); } for (j = jst; j <= jend; j++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * ( u[i][j-1][k][0] - 2.0 * u[i][j][k][0] + u[i][j+1][k][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] ) + dy2 * ty1 * ( u[i][j-1][k][1] - 2.0 * u[i][j][k][1] + u[i][j+1][k][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] ) + dy3 * ty1 * ( u[i][j-1][k][2] - 2.0 * u[i][j][k][2] + u[i][j+1][k][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] ) + dy4 * ty1 * ( u[i][j-1][k][3] - 2.0 * u[i][j][k][3] + u[i][j+1][k][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] ) + dy5 * ty1 * ( u[i][j-1][k][4] - 2.0 * u[i][j][k][4] + u[i][j+1][k][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * ( + 5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m] ); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * ( - 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m] ); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0 * u[i][j-1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j+1][k][m] + u[i][j+2][k][m] ); } } for (m = 0; m < 5; m++) { rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m] - dssp * ( u[i][ny-5][k][m] - 4.0 * u[i][ny-4][k][m] + 6.0 * u[i][ny-3][k][m] - 4.0 * u[i][ny-2][k][m] ); rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m] - dssp * ( u[i][ny-4][k][m] - 4.0 * u[i][ny-3][k][m] + 5.0 * u[i][ny-2][k][m] ); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp for for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (k = 0; k <= nz-1; k++) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q); flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41; } for (k = 1; k <= nz - 2; k++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] ); } } for (k = 1; k <= nz-1; k++) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k-1][0]; u21km1 = tmp * u[i][j][k-1][1]; u31km1 = tmp * u[i][j][k-1][2]; u41km1 = tmp * u[i][j][k-1][3]; u51km1 = tmp * u[i][j][k-1][4]; flux[i][j][k][1] = tz3 * ( u21k - u21km1 ); flux[i][j][k][2] = tz3 * ( u31k - u31km1 ); flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) ) - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) ) + (1.0/6.0) * tz3 * ( pow2(u41k) - pow2(u41km1) ) + C1 * C5 * tz3 * ( u51k - u51km1 ); } for (k = 1; k <= nz - 2; k++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * ( u[i][j][k-1][0] - 2.0 * u[i][j][k][0] + u[i][j][k+1][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] ) + dz2 * tz1 * ( u[i][j][k-1][1] - 2.0 * u[i][j][k][1] + u[i][j][k+1][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] ) + dz3 * tz1 * ( u[i][j][k-1][2] - 2.0 * u[i][j][k][2] + u[i][j][k+1][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] ) + dz4 * tz1 * ( u[i][j][k-1][3] - 2.0 * u[i][j][k][3] + u[i][j][k+1][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] ) + dz5 * tz1 * ( u[i][j][k-1][4] - 2.0 * u[i][j][k][4] + u[i][j][k+1][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * ( + 5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m] ); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * ( - 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m] ); } for (k = 3; k <= nz - 4; k++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0 * u[i][j][k-1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k+1][m] + u[i][j][k+2][m] ); } } for (m = 0; m < 5; m++) { rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m] - dssp * ( u[i][j][nz-5][m] - 4.0 * u[i][j][nz-4][m] + 6.0 * u[i][j][nz-3][m] - 4.0 * u[i][j][nz-2][m] ); rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m] - dssp * ( u[i][j][nz-4][m] - 4.0 * u[i][j][nz-3][m] + 5.0 * u[i][j][nz-2][m] ); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv(void) { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k; int iglob, jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < nx; i++) { iglob = i; for (j = 0; j < ny; j++) { jglob = j; exact( iglob, jglob, 0, &u[i][j][0][0] ); exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] ); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < nx; i++) { iglob = i; for (k = 0; k < nz; k++) { exact( iglob, 0, k, &u[i][0][k][0] ); } } #pragma omp for for (i = 0; i < nx; i++) { iglob = i; for (k = 0; k < nz; k++) { exact( iglob, ny0-1, k, &u[i][ny-1][k][0] ); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ #pragma omp for for (j = 0; j < ny; j++) { jglob = j; for (k = 0; k < nz; k++) { exact( 0, jglob, k, &u[0][j][k][0] ); } } #pragma omp for for (j = 0; j < ny; j++) { jglob = j; for (k = 0; k < nz; k++) { exact( nx0-1, jglob, k, &u[nx-1][j][k][0] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff(void) { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / ( nx0 - 1 ); deta = 1.0 / ( ny0 - 1 ); dzeta = 1.0 / ( nz0 - 1 ); tx1 = 1.0 / ( dxi * dxi ); tx2 = 1.0 / ( 2.0 * dxi ); tx3 = 1.0 / dxi; ty1 = 1.0 / ( deta * deta ); ty2 = 1.0 / ( 2.0 * deta ); ty3 = 1.0 / deta; tz1 = 1.0 / ( dzeta * dzeta ); tz2 = 1.0 / ( 2.0 * dzeta ); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv(void) { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; double xi, eta, zeta; double pxi, peta, pzeta; double ue_1jk[5],ue_nx0jk[5],ue_i1k[5], ue_iny0k[5],ue_ij1[5],ue_ijnz[5]; #pragma omp for for (j = 0; j < ny; j++) { jglob = j; for (k = 1; k < nz - 1; k++) { zeta = ((double)k) / (nz-1); if (jglob != 0 && jglob != ny0-1) { eta = ( (double) (jglob) ) / (ny0-1); for (i = 0; i < nx; i++) { iglob = i; if(iglob != 0 && iglob != nx0-1) { xi = ( (double) (iglob) ) / (nx0-1); exact (0,jglob,k,ue_1jk); exact (nx0-1,jglob,k,ue_nx0jk); exact (iglob,0,k,ue_i1k); exact (iglob,ny0-1,k,ue_iny0k); exact (iglob,jglob,0,ue_ij1); exact (iglob,jglob,nz-1,ue_ijnz); for (m = 0; m < 5; m++) { pxi = ( 1.0 - xi ) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = ( 1.0 - eta ) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = ( 1.0 - zeta ) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor(void) { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int istep; double tmp; double delunm[5], tv[ISIZ1][ISIZ2][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ; /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ #pragma omp parallel private(i,j,k,m) { #pragma omp for for (i = 0; i < ISIZ1; i++) { for (j = 0; j < ISIZ2; j++) { for (k = 0; k < 5; k++) { for (m = 0; m < 5; m++) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm ); } timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ #pragma omp parallel private(istep,i,j,k,m) { for (istep = 1; istep <= itmax; istep++) { if (istep%20 == 0 || istep == itmax || istep == 1) { #pragma omp master printf(" Time step %4d\n", istep); } /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ #pragma omp for for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (k = 1; k <= nz - 2; k++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k++) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0 ); } #pragma omp barrier for (k = nz - 2; k >= 1; k--) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0 ); } #pragma omp barrier /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ #pragma omp for for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (k = 1; k <= nz-2; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if ( istep % inorm == 0 ) { l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, delunm ); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if ( ( istep % inorm == 0 ) || ( istep == itmax ) ) { l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm ); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if ( ( rsdnm[0] < tolrsd[0] ) && ( rsdnm[1] < tolrsd[1] ) && ( rsdnm[2] < tolrsd[2] ) && ( rsdnm[3] < tolrsd[3] ) && ( rsdnm[4] < tolrsd[4] ) ) { exit(1); } } } /* end parallel */ timer_stop(1); maxtime= timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5], double xce[5], double xci, char *cclass, boolean *verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5],xceref[5],xciref, xcrdif[5],xcedif[5],xcidif, epsilon, dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *cclass = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *cclass = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { *cclass = 'W'; /* SPEC95fp size */ dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *cclass = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *cclass = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *cclass = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } xcidif = fabs((xci - xciref)/xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (*cclass != 'U') { printf("\n Verification being performed for cclass %1c\n", *cclass); printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *cclass = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown cclass\n"); } if (*cclass != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*cclass == 'U') { printf(" %2d %20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } } if (*cclass != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*cclass == 'U') { printf(" %2d %20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } } if (*cclass != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if (*cclass == 'U') { printf(" %20.13e\n", xci); } else if (xcidif > epsilon) { *verified = FALSE; printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } else { printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } if (*cclass == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
utils.c
#include "utils.h" #include <string.h> void merge_scores(int * scores, char ** titles, unsigned long int size) { unsigned long int i1 = 0; unsigned long int i2 = size / 2; unsigned long int it = 0; // allocate memory for temporary buffers char ** tmp2 = (char **) malloc(size*sizeof(char *)); int * tmp3 = (int *) malloc (size*sizeof(int)); while(i1 < size/2 && i2 < size) { if (scores[i1] > scores[i2]) { tmp2[it] = titles[i1]; tmp3[it] = scores[i1]; i1++; } else { tmp2[it] = titles[i2]; tmp3[it] = scores[i2]; i2 ++; } it ++; } while (i1 < size/2) { tmp2[it] = titles[i1]; tmp3[it] = scores[i1]; i1++; it++; } while (i2 < size) { tmp2[it] = titles[i2]; tmp3[it] = scores[i2]; i2++; it++; } memcpy(titles, tmp2, size*sizeof(char *)); memcpy(scores, tmp3, size*sizeof(int)); free(tmp2); free(tmp3); } void mergesort_scores_serial(int * scores, char ** titles, unsigned long int size) { int tmp_score; char * tmp_seq; if (size == 2) { if (scores[0] <= scores[1]) { // swap scores tmp_score = scores[0]; scores[0] = scores[1]; scores[1] = tmp_score; // swap titles tmp_seq = titles[0]; titles[0] = titles[1]; titles[1] = tmp_seq; } } else { if (size > 2){ mergesort_scores_serial(scores, titles, size/2); mergesort_scores_serial(scores + size/2, titles + size/2, size - size/2); merge_scores(scores, titles, size); } } } void sort_scores (int * scores, char ** titles, unsigned long int size, int threads) { if ( threads == 1) { mergesort_scores_serial(scores, titles, size); } else if (threads > 1) { #pragma omp parallel sections num_threads(threads) { #pragma omp section sort_scores(scores, titles, size/2, threads/2); #pragma omp section sort_scores(scores + size/2, titles + size/2, size-size/2, threads-threads/2); } merge_scores(scores, titles, size); } // threads > 1 } // Wall time double dwalltime() { double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; }
cpalgorithm.h
#include <algorithm> #include <fstream> #include <iostream> #include <random> #include <vector> #include <map> #include <numeric> #include <cmath> #include <graph.h> #if !defined(MAX) #define MAX(A, B) ((A) > (B) ? (A) : (B)) #endif #if !defined(MIN) #define MIN(A, B) ((A) < (B) ? (A) : (B)) #endif using namespace std; class CPAlgorithm{ public: // Constructor CPAlgorithm(); //CPAlgorithm(int num_runs, double significance_level); // Getter vector<int> get_c () const; vector<double> get_x () const; //vector<double> get_p_values () const; // Setter //void set_significance_level (double s); //void set_num_rand_nets (double r); // Detect significant CP pairs in networks virtual void detect(const Graph& G) = 0; // Compute the quality of CP pairs void _calc_Q( const Graph& G, double& Q, vector<double>& q); virtual void calc_Q( const Graph& G, const vector<int>& c, const vector<double>& x, double& Q, vector<double>& q) = 0; protected: vector<int> _c; // _c[i] indicates the index of the CP pair of node i vector<double> _x; // x[i]=1 or x[i]=0 indicates a core or a periphery, respectively. //vector<double> _p_values; // p_values double _Q; // quality value vector<double> _q; // quality values //double _significance_level; // statistical significance level //int _num_rand_nets; // number of randomised networks to be generated int _num_runs; // Number of runs of the algorithm mt19937_64 _mtrnd; // random number generator /* -------------------------- Functions needed to be implemented in each algorithm virtual void _detect_( // const Graph& G, vector<int>& c, vector<bool>& x, double& Q, vector<double>& q, mt19937_64& mtrnd) = 0; -------------------------- */ // Detect CP structure and compute their quality /* -------------------------- Statistical test // Initialise parameter of randomised networks generator virtual void _init_randomised_network_generator(const Graph& G)= 0; // Generate randomised networks virtual void _generate_randomised_network(Graph& G, mt19937_64& mtrnd) = 0; void _estimate_statistical_significance( const Graph& G, const vector<int>& c, const vector<bool>& x, const int num_of_rand_nets, vector<double>& p_values ); double _normcdf(double value); -------------------------- */ /* -------------------------- Other utility functions -------------------------- */ mt19937_64 _init_random_number_generator(); }; CPAlgorithm::CPAlgorithm(){ _mtrnd = _init_random_number_generator(); //_num_rand_nets = 500; _num_runs = 10; } mt19937_64 CPAlgorithm::_init_random_number_generator(){ mt19937_64 mtrnd; random_device r; seed_seq seed{ r(), r(), r(), r(), r(), r(), r(), r() }; mtrnd.seed(seed); return mtrnd; } // Getter vector<int> CPAlgorithm::get_c() const{ return _c; } vector<double> CPAlgorithm::get_x() const{ return _x; } /* vector<double> CPAlgorithm::get_p_values() const{ return _p_values; } void CPAlgorithm::set_significance_level (double s){ _significance_level = s; } void CPAlgorithm::set_num_rand_nets (double r){ _num_rand_nets = r; } */ /* void CPAlgorithm::detect(const Graph& G){ _detect_(G, _c, _x, _Q, _q, _mtrnd); int K = _q.size(); vector<double> tmp(K,0.0); _p_values = tmp; if (_significance_level < 1.0) { _estimate_statistical_significance(G, _c, _x, _num_rand_nets, _p_values); } } void CPAlgorithm::_estimate_statistical_significance( const Graph& G, const vector<int>& c, const vector<bool>& x, const int num_of_rand_nets, vector<double>& p_values ) { // Initialise variables bool noSelfloop = false; bool isunweighted = false; int K = *max_element(c.begin(), c.end()) + 1; int N = G.get_num_nodes(); double Q; vector<double> q; vector<int> n(K); vector<double> deg(N); fill(n.begin(), n.end(), 0); _calc_Q(G, c, x, Q, q); for (int i = 0; i < N; i++) { n[c[i]]++; }; int numthread;// create random number generator per each thread _init_randomised_network_generator(G); # pragma omp parallel { numthread = omp_get_num_threads(); } vector<mt19937_64> mtrnd_list(numthread); for(int i = 0; i < numthread; i++){ mtrnd_list[i] = _init_random_number_generator(); } vector<int> nhat; vector<double> qhat; #ifdef _OPENMP #pragma omp parallel for shared(nhat, qhat, mtrnd_list) #endif for (int it = 0; it < num_of_rand_nets; it++) { // Generate a randomised network using the configuration model. Graph G_rand(N); int tid = omp_get_thread_num(); mt19937_64 mtrnd = mtrnd_list[tid]; _generate_randomised_network(G_rand, mtrnd); //_Chung_Lu_Algorithm(deg, deg_rank, G_rand, noSelfloop, isunweighted, mtrnd); // Detect core-periphery pairs using the KM--config algorithm vector<int> c_rand; vector<bool> x_rand; vector<double> q_rand; double Q_rand; _detect_(G_rand, c_rand, x_rand, Q_rand, q_rand, mtrnd); // Save the quality and size of core-periphery pairs in the randomised network. int K_rand = q_rand.size(); vector<int> nsr(K_rand, 0); for (int i = 0; i < N; i++) { nsr[c_rand[i]]++; } #ifdef _OPENMP #pragma omp critical #endif for (int k = 0; k < K_rand; k++) { nhat.push_back(nsr[k]); qhat.push_back(q_rand[k]); } } // Compute the mean and variance of the quality and size int S = nhat.size(); double mu_n = (double)accumulate(nhat.begin(), nhat.end(), 0.0) / (double)S; double mu_q = (double)accumulate(qhat.begin(), qhat.end(), 0.0) / (double)S; double sig_nn = 0; double sig_qq = 0; double sig_nq = 0; for (int s = 0; s < S; s++) { sig_nn += pow((double)nhat[s] - mu_n, 2) / (double)(S - 1); sig_qq += pow(qhat[s] - mu_q, 2) / (double)(S - 1); sig_nq += ((double)nhat[s] - mu_n) * (qhat[s] - mu_q) / (double)(S - 1); } // Compute p-values using the Gaussian kernel density estimator double h = MAX(pow((double)S, -1.0 / 6.0), 1e-32); p_values.clear(); p_values.assign(K, 1.0); for (int k = 0; k < K; k++) { double numer = 0.0; double denom = 0.0; for (int s = 0; s < S; s++) { double qbar = qhat[s] + sig_nq / sig_nn * (double)(n[k] - nhat[s]); double t = sig_nn * (q[k] - qbar) / (sqrt(sig_nn * sig_qq - sig_nq * sig_nq) * h); double cum = _normcdf(t); double w = exp(- (double)pow(n[k] - nhat[s], 2) / (2.0 * h * h * sig_nn)) + 1e-33; numer += cum * w; denom += w; } p_values[k] = 1.0 - numer / denom; } } double CPAlgorithm::_normcdf(double value) { return 0.5 + 0.5 * erf(value * M_SQRT1_2); } */ void CPAlgorithm::_calc_Q( const Graph& G, double& Q, vector<double>& q){ calc_Q(G, _c, _x, Q, q); }
DRB050-functionparameter-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Arrays passed as function parameters */ void foo1(double o1[], double c[], int len) { int i ; #pragma omp parallel for private(i ) for (i = 0; i < len; ++i) { double volnew_o8 = 0.5 * c[i]; o1[i] = volnew_o8; } } double o1[100]; double c[100]; int main() { int i; int len = 100; #pragma omp parallel for private(i ) for (i = 0; i < len; ++i) { c[i] = i + 1.01; o1[i] = i + 1.01; } foo1 (o1, c, 100); for (i = 0; i < len; ++i) { printf("%lf\n",o1[i]); } return 0; }
8391.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp parallel for simd for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp parallel for simd for (i = 0; i < _PB_N; i++) { for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp parallel for simd for (j1 = 0; j1 < _PB_M; j1++) { for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
GB_transpose.c
//------------------------------------------------------------------------------ // GB_transpose: C=A' or C=op(A'), with typecasting //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLS: GB_builder // Transpose a matrix, C=A', and optionally apply a unary operator and/or // typecast the values. The transpose may be done in-place, in which case C or // A are modified in-place. // There are two ways to use this method: // C = A' C and A are different // C = C' C is transposed in-place, (C==A aliased) // In both cases, the header for C and A must already be allocated (either // static or dynamic). A is never modified, unless C==A. C and A cannot be // NULL on input. If in place (C == A) then C and A is a valid matrix on input // (the input matrix A). If C != A, the contents of C are not defined on input, // and any prior content is freed. Either header may be static or dynamic. // The input matrix A may have shallow components (even if in-place), and the // output C may also have shallow components (even if the input matrix is not // shallow). // This function is CSR/CSC agnostic; it sets the output matrix format from // C_is_csc but otherwise ignores the CSR/CSC type of A and C. // The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is // m-by-n, then at most O(e/n) threads are used. The GB_builder method is more // scalable, but not as fast with a modest number of threads. #include "GB_transpose.h" #include "GB_build.h" #include "GB_apply.h" #define GB_FREE_WORKSPACE \ { \ GB_FREE (&iwork, iwork_size) ; \ GB_FREE (&jwork, jwork_size) ; \ GB_FREE (&Swork, Swork_size) ; \ GB_WERK_POP (Count, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (T) ; \ /* freeing C also frees A if transpose is done in-place */ \ GB_phbix_free (C) ; \ } //------------------------------------------------------------------------------ // GB_transpose //------------------------------------------------------------------------------ GrB_Info GB_transpose // C=A', C=(ctype)A' or C=op(A') ( GrB_Matrix C, // output matrix C, possibly modified in-place GrB_Type ctype, // desired type of C; if NULL use A->type. // ignored if op is present (cast to op->ztype) const bool C_is_csc, // desired CSR/CSC format of C const GrB_Matrix A, // input matrix; C == A if done in place // no operator is applied if op is NULL const GB_Operator op_in, // unary/idxunop/binop to apply const GrB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) bool flipij, // if true, flip i,j for user idxunop GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs and determine if transpose is done in-place //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL) ; ASSERT (A != NULL) ; bool in_place = (A == C) ; struct GB_Matrix_opaque T_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; GB_WERK_DECLARE (Count, int64_t) ; int64_t *iwork = NULL ; size_t iwork_size = 0 ; int64_t *jwork = NULL ; size_t jwork_size = 0 ; GB_void *Swork = NULL ; size_t Swork_size = 0 ; ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ; ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ; ASSERT_OP_OK_OR_NULL (op_in, "unop/binop for GB_transpose", GB0) ; ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ; if (in_place) { GBURBLE ("(in-place transpose) ") ; } // get the current sparsity control of A float A_hyper_switch = A->hyper_switch ; float A_bitmap_switch = A->bitmap_switch ; int A_sparsity_control = A->sparsity_control ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; // wait if A has pending tuples or zombies; leave jumbled unless avdim == 1 if (GB_PENDING (A) || GB_ZOMBIES (A) || (avdim == 1 && GB_JUMBLED (A))) { GB_OK (GB_wait (A, "A", Context)) ; } ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_IMPLIES (avdim == 1, !GB_JUMBLED (A))) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- GrB_Type atype = A->type ; size_t asize = atype->size ; GB_Type_code acode = atype->code ; bool A_is_bitmap = GB_IS_BITMAP (A) ; bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; int64_t anz = GB_nnz (A) ; int64_t anz_held = GB_nnz_held (A) ; int64_t anvec = A->nvec ; int64_t anvals = A->nvals ; //-------------------------------------------------------------------------- // determine the max number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // determine the type of C and get the unary, idxunop, binary operator //-------------------------------------------------------------------------- // If a unary, idxunop, or binary operator is present, C is always returned // as the ztype of the operator. The input ctype is ignored. GB_Operator op = NULL ; GB_Opcode opcode = GB_NOP_code ; if (op_in == NULL) { // no operator if (ctype == NULL) { // no typecasting if ctype is NULL ctype = atype ; } } else { opcode = op_in->opcode ; if (GB_IS_UNARYOP_CODE (opcode)) { // get the unary operator if (atype == op_in->xtype && opcode == GB_IDENTITY_unop_code) { // op is a built-in unary identity operator, with the same type // as A, so do not apply the operator and do not typecast. op // is NULL. ctype = atype ; } else { // apply the operator, z=unop(x) op = op_in ; ctype = op->ztype ; } } else // binary or idxunop { // get the binary or idxunop operator: only GB_apply calls // GB_transpose with op_in, and it ensures this condition holds: // first(A,y), second(x,A) have been renamed to identity(A), and // PAIR has been renamed one(A), so these cases do not occur here. ASSERT (!((opcode == GB_PAIR_binop_code) || (opcode == GB_FIRST_binop_code && !binop_bind1st) || (opcode == GB_SECOND_binop_code && binop_bind1st))) ; // apply the operator, z=binop(A,y), binop(x,A), or idxunop(A,y) op = op_in ; ctype = op->ztype ; } } bool user_idxunop = (opcode == GB_USER_idxunop_code) ; //-------------------------------------------------------------------------- // check for positional operators //-------------------------------------------------------------------------- bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GB_Operator save_op = op ; if (op_is_positional) { // do not apply the positional op until after the transpose; // replace op with the ONE operator, as a placeholder. C will be // constructed as iso, and needs to be expanded to non-iso when done. ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32 || ctype == GrB_BOOL) ; op = (GB_Operator) GB_unop_one (ctype->code) ; } else if (user_idxunop) { // do not apply the user op until after the transpose; replace with // no operator at all, with no typecast op = NULL ; ctype = atype ; } //-------------------------------------------------------------------------- // determine the iso status of C //-------------------------------------------------------------------------- size_t csize = ctype->size ; ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ; GB_iso_code C_code_iso = GB_iso_unop_code (A, op, binop_bind1st) ; bool C_iso = (C_code_iso != GB_NON_ISO) ; ASSERT (GB_IMPLIES (A->iso, C_iso)) ; if (C_iso && !op_is_positional) { GBURBLE ("(iso transpose) ") ; } else { GBURBLE ("(transpose) ") ; } //========================================================================== // T = A', T = (ctype) A', or T = op (A') //========================================================================== if (anz == 0) { //---------------------------------------------------------------------- // A is empty //---------------------------------------------------------------------- // create a new empty matrix T, with the new type and dimensions. // set T->iso = false OK GB_OK (GB_new_bix (&T, true, // hyper, static header ctype, avdim, avlen, GB_Ap_calloc, C_is_csc, GxB_HYPERSPARSE, true, A_hyper_switch, 1, 1, true, false, Context)) ; } else if (A_is_bitmap || GB_as_if_full (A)) { //---------------------------------------------------------------------- // transpose a bitmap/as-if-full matrix or vector //---------------------------------------------------------------------- // A is either bitmap or as-is-full (full, or sparse or hypersparse // with all entries present, no zombies, no pending tuples, and not // jumbled). T = A' is either bitmap or full. int T_sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ; bool T_cheap = // T can be done quickly if: (avlen == 1 || avdim == 1) // A is a row or column vector, && op == NULL // no operator to apply, && atype == ctype ; // and no typecasting // allocate T if (T_cheap) { // just initialize the static header of T, not T->b or T->x info = GB_new (&T, true, // bitmap or full, static header ctype, avdim, avlen, GB_Ap_null, C_is_csc, T_sparsity, A_hyper_switch, 1, Context) ; ASSERT (info == GrB_SUCCESS) ; } else { // allocate all of T, including T->b and T->x // set T->iso = C_iso OK GB_OK (GB_new_bix (&T, true, // bitmap or full, static header ctype, avdim, avlen, GB_Ap_null, C_is_csc, T_sparsity, true, A_hyper_switch, 1, anz_held, true, C_iso, Context)) ; } T->magic = GB_MAGIC ; if (T_sparsity == GxB_BITMAP) { T->nvals = anvals ; // for bitmap case only } //---------------------------------------------------------------------- // T = A' //---------------------------------------------------------------------- int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ; if (T_cheap) { // no work to do. Transposing does not change A->b or A->x T->b = A->b ; T->b_size = A->b_size ; T->x = A->x ; T->x_size = A->x_size ; if (in_place) { // transplant A->b and A->x into T T->b_shallow = A->b_shallow ; T->x_shallow = A->x_shallow ; A->b = NULL ; A->x = NULL ; } else { // T is a purely shallow copy of A T->b_shallow = (A->b != NULL) ; T->x_shallow = true ; } T->iso = A->iso ; // OK } else if (op == NULL) { // do not apply an operator; optional typecast to T->type GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ; } else { // apply an operator, T has type op->ztype GB_transpose_op (T, C_code_iso, op, scalar, binop_bind1st, A, NULL, NULL, 0, nthreads) ; } ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ; ASSERT (!GB_JUMBLED (T)) ; } else if (avdim == 1) { //---------------------------------------------------------------------- // transpose a "column" vector into a "row" //---------------------------------------------------------------------- // transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen). // A must be sorted first. ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ; ASSERT (!GB_JUMBLED (A)) ; //---------------------------------------------------------------------- // allocate T //---------------------------------------------------------------------- // Initialized the header of T, with no content, and initialize the // type and dimension of T. T is hypersparse. info = GB_new (&T, true, // hyper; static header ctype, 1, avlen, GB_Ap_null, C_is_csc, GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ; ASSERT (info == GrB_SUCCESS) ; // allocate T->p, T->i, and optionally T->x, but not T->h T->p = GB_MALLOC (anz+1, int64_t, &(T->p_size)) ; T->i = GB_MALLOC (anz , int64_t, &(T->i_size)) ; bool allocate_Tx = (op != NULL || C_iso) || (ctype != atype) ; if (allocate_Tx) { // allocate new space for the new typecasted numerical values of T T->x = GB_XALLOC (C_iso, anz, csize, &(T->x_size)) ; } if (T->p == NULL || T->i == NULL || (allocate_Tx && T->x == NULL)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // numerical values of T: apply the op, typecast, or make shallow copy //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op != NULL || C_iso) { // T->x = unop (A), binop (A,scalar), or binop (scalar,A), or // compute the iso value of T = 1, A, or scalar, without any op info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op, scalar, binop_bind1st, flipij, A, Context) ; ASSERT (info == GrB_SUCCESS) ; } else if (ctype != atype) { // copy the values from A into T and cast from atype to ctype GB_cast_matrix (T, A, Context) ; } else { // no type change; numerical values of T are a shallow copy of A. ASSERT (!allocate_Tx) ; T->x = A->x ; T->x_size = A->x_size ; if (in_place) { // transplant A->x as T->x T->x_shallow = A->x_shallow ; A->x = NULL ; } else { // T->x is a shallow copy of A->x T->x_shallow = true ; } } // each entry in A becomes a non-empty vector in T; // T is a hypersparse 1-by-avlen matrix // transplant or shallow-copy A->i as the new T->h T->h = A->i ; T->h_size = A->i_size ; if (in_place) { // transplant A->i as T->h T->h_shallow = A->i_shallow ; A->i = NULL ; } else { // T->h is a shallow copy of A->i T->h_shallow = true ; } // T->p = 0:anz and T->i = zeros (1,anz), newly allocated T->plen = anz ; T->nvec = anz ; T->nvec_nonempty = anz ; // fill the vector pointers T->p int nthreads = GB_nthreads (anz, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < anz ; k++) { T->i [k] = 0 ; T->p [k] = k ; } T->p [anz] = anz ; T->iso = C_iso ; T->magic = GB_MAGIC ; } else if (avlen == 1) { //---------------------------------------------------------------------- // transpose a "row" into a "column" vector //---------------------------------------------------------------------- // transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1). // if A->vlen is 1, all vectors of A are implicitly sorted ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ; //---------------------------------------------------------------------- // allocate workspace, if needed //---------------------------------------------------------------------- int ntasks = 0 ; int nth = GB_nthreads (avdim, chunk, nthreads_max) ; if (nth > 1 && !A_is_hyper) { // ntasks and Count are not needed if nth == 1 ntasks = 8 * nth ; ntasks = GB_IMIN (ntasks, avdim) ; ntasks = GB_IMAX (ntasks, 1) ; GB_WERK_PUSH (Count, ntasks+1, int64_t) ; if (Count == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } // Allocate the header of T, with no content // and initialize the type and dimension of T. info = GB_new (&T, true, // sparse; static header ctype, avdim, 1, GB_Ap_null, C_is_csc, GxB_SPARSE, A_hyper_switch, 0, Context) ; ASSERT (info == GrB_SUCCESS) ; T->iso = C_iso ; // OK // allocate new space for the values and pattern T->p = GB_CALLOC (2, int64_t, &(T->p_size)) ; if (!A_is_hyper) { // A is sparse, so new space is needed for T->i T->i = GB_MALLOC (anz, int64_t, &(T->i_size)) ; } bool allocate_Tx = (op != NULL || C_iso) || (ctype != atype) ; if (allocate_Tx) { // allocate new space for the new typecasted numerical values of T T->x = GB_XALLOC (C_iso, anz, csize, &(T->x_size)) ; } if (T->p == NULL || (T->i == NULL && !A_is_hyper) || (T->x == NULL && allocate_Tx)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // numerical values of T: apply the op, typecast, or make shallow copy //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op != NULL || C_iso) { // T->x = unop (A), binop (A,scalar), or binop (scalar,A), or // compute the iso value of T = 1, A, or scalar, without any op info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op, scalar, binop_bind1st, flipij, A, Context) ; ASSERT (info == GrB_SUCCESS) ; } else if (ctype != atype) { // copy the values from A into T and cast from atype to ctype GB_cast_matrix (T, A, Context) ; } else { // no type change; numerical values of T are a shallow copy of A. ASSERT (!allocate_Tx) ; T->x = A->x ; T->x_size = A->x_size ; if (in_place) { // transplant A->x as T->x T->x_shallow = A->x_shallow ; A->x = NULL ; } else { // T->x is a shallow copy of A->x T->x_shallow = true ; } } //---------------------------------------------------------------------- // compute T->i //---------------------------------------------------------------------- if (A_is_hyper) { //------------------------------------------------------------------ // each non-empty vector in A becomes an entry in T //------------------------------------------------------------------ T->i = A->h ; T->i_size = A->h_size ; if (in_place) { // transplant A->h as T->i T->i_shallow = A->h_shallow ; A->h = NULL ; } else { // T->i is a shallow copy of A->h T->i_shallow = true ; } } else { //------------------------------------------------------------------ // find the non-empty vectors of A, which become entries in T //------------------------------------------------------------------ if (nth == 1) { //-------------------------------------------------------------- // construct T->i with a single thread //-------------------------------------------------------------- int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (A->p [j] < A->p [j+1]) { T->i [k++] = j ; } } ASSERT (k == anz) ; } else { //-------------------------------------------------------------- // construct T->i in parallel //-------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = 0 ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (A->p [j] < A->p [j+1]) { k++ ; } } Count [tid] = k ; } GB_cumsum (Count, ntasks, NULL, 1, NULL) ; ASSERT (Count [ntasks] == anz) ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (A->p [j] < A->p [j+1]) { T->i [k++] = j ; } } } } #ifdef GB_DEBUG int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (A->p [j] < A->p [j+1]) { ASSERT (T->i [k] == j) ; k++ ; } } ASSERT (k == anz) ; #endif } //--------------------------------------------------------------------- // vector pointers of T //--------------------------------------------------------------------- // T->p = [0 anz] ASSERT (T->plen == 1) ; ASSERT (T->nvec == 1) ; T->nvec_nonempty = (anz == 0) ? 0 : 1 ; T->p [1] = anz ; T->magic = GB_MAGIC ; ASSERT (!GB_JUMBLED (T)) ; } else { //---------------------------------------------------------------------- // transpose a general sparse or hypersparse matrix //---------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ; // T=A' with optional typecasting, or T=op(A') //---------------------------------------------------------------------- // select the method //---------------------------------------------------------------------- int nworkspaces_bucket, nthreads_bucket ; bool use_builder = GB_transpose_method (A, &nworkspaces_bucket, &nthreads_bucket, Context) ; //---------------------------------------------------------------------- // transpose the matrix with the selected method //---------------------------------------------------------------------- if (use_builder) { //------------------------------------------------------------------ // transpose via GB_builder //------------------------------------------------------------------ //------------------------------------------------------------------ // allocate and create iwork //------------------------------------------------------------------ // allocate iwork of size anz iwork = GB_MALLOC (anz, int64_t, &iwork_size) ; if (iwork == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // Construct the "row" indices of C, which are "column" indices of // A. This array becomes the permanent T->i on output. GB_OK (GB_extract_vector_list (iwork, A, Context)) ; //------------------------------------------------------------------ // allocate the output matrix and additional space (jwork and Swork) //------------------------------------------------------------------ // initialize the header of T, with no content // content, and initialize the type and dimension of T. info = GB_new (&T, true, // hyper, static header ctype, avdim, avlen, GB_Ap_null, C_is_csc, GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ; ASSERT (info == GrB_SUCCESS) ; // if in_place, the prior A->p and A->h can now be freed if (in_place) { if (!A->p_shallow) GB_FREE (&A->p, A->p_size) ; if (!A->h_shallow) GB_FREE (&A->h, A->h_size) ; } GB_void *S_input = NULL ; // for the GB_builder method, if the transpose is done in-place and // A->i is not shallow, A->i can be used and then freed. // Otherwise, A->i is not modified at all. bool ok = true ; bool recycle_Ai = (in_place && !A->i_shallow) ; if (!recycle_Ai) { // allocate jwork of size anz jwork = GB_MALLOC (anz, int64_t, &jwork_size) ; ok = ok && (jwork != NULL) ; } if (op != NULL && !C_iso) { Swork = (GB_void *) GB_XALLOC (C_iso, anz, csize, &Swork_size) ; ok = ok && (Swork != NULL) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //------------------------------------------------------------------ // construct jwork and Swork //------------------------------------------------------------------ // "row" indices of A become "column" indices of C if (recycle_Ai) { // A->i is used as workspace for the "column" indices of C. // jwork is A->i, and is freed by GB_builder. jwork = A->i ; jwork_size = A->i_size ; A->i = NULL ; ASSERT (in_place) ; } else { // copy A->i into jwork, making a deep copy. jwork is freed by // GB_builder. A->i is not modified, even if out of memory. GB_memcpy (jwork, A->i, anz * sizeof (int64_t), nthreads_max) ; } // numerical values: apply the op, typecast, or make shallow copy GrB_Type stype ; GB_void sscalar [GB_VLA(csize)] ; if (C_iso) { // apply the op to the iso scalar GB_iso_unop (sscalar, ctype, C_code_iso, op, A, scalar) ; S_input = sscalar ; // S_input is used instead of Swork Swork = NULL ; stype = ctype ; } else if (op != NULL) { // Swork = op (A) info = GB_apply_op (Swork, ctype, C_code_iso, op, scalar, binop_bind1st, flipij, A, Context) ; ASSERT (info == GrB_SUCCESS) ; // GB_builder will not need to typecast Swork to T->x, and it // may choose to transplant it into T->x S_input = NULL ; // Swork is used instead of S_input stype = ctype ; } else { // GB_builder will typecast S_input from atype to ctype if // needed. S_input is a shallow copy of Ax, and must not be // modified. ASSERT (!C_iso) ; ASSERT (!A->iso) ; S_input = (GB_void *) A->x ; // S_input is used instead of Swork Swork = NULL ; stype = atype ; } //------------------------------------------------------------------ // build the matrix: T = (ctype) A' or op ((xtype) A') //------------------------------------------------------------------ // internally, jwork is freed and then T->x is allocated, so the // total memory usage is anz * max (csize, sizeof(int64_t)). T is // always hypersparse. Either T, Swork, and S_input are all iso, // or all non-iso, depending on C_iso. GB_OK (GB_builder ( T, // create T using a static header ctype, // T is of type ctype avdim, // T->vlen = A->vdim, always > 1 avlen, // T->vdim = A->vlen, always > 1 C_is_csc, // T has the same CSR/CSC format as C &iwork, // iwork_handle, becomes T->i on output &iwork_size, &jwork, // jwork_handle, freed on output &jwork_size, &Swork, // Swork_handle, freed on output &Swork_size, false, // tuples are not sorted on input true, // tuples have no duplicates anz, // size of iwork, jwork, and Swork true, // is_matrix: unused NULL, NULL, // original I,J indices: not used here S_input, // array of values of type stype, not modified C_iso, // iso property of T is the same as C->iso anz, // number of tuples NULL, // no dup operator needed (input has no duplicates) stype, // type of S_input or Swork Context )) ; // GB_builder always frees jwork, and either frees iwork or // transplants it in to T->i and sets iwork to NULL. So iwork and // jwork are always NULL on output. GB_builder does not modify // S_input. ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ; ASSERT (!GB_JUMBLED (T)) ; } else { //------------------------------------------------------------------ // transpose via bucket sort //------------------------------------------------------------------ // T = A' and typecast to ctype GB_OK (GB_transpose_bucket (T, C_code_iso, ctype, C_is_csc, A, op, scalar, binop_bind1st, nworkspaces_bucket, nthreads_bucket, Context)) ; ASSERT_MATRIX_OK (T, "T from bucket", GB0) ; ASSERT (GB_JUMBLED_OK (T)) ; } } //========================================================================== // free workspace, apply positional op, and transplant/conform T into C //========================================================================== //-------------------------------------------------------------------------- // free workspace //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; if (in_place) { // free prior space of A, if transpose is done in-place GB_phbix_free (A) ; } //-------------------------------------------------------------------------- // transplant T into the result C //-------------------------------------------------------------------------- // transplant the control settings from A to C C->hyper_switch = A_hyper_switch ; C->bitmap_switch = A_bitmap_switch ; C->sparsity_control = A_sparsity_control ; GB_OK (GB_transplant (C, ctype, &T, Context)) ; ASSERT_MATRIX_OK (C, "C transplanted in GB_transpose", GB0) ; ASSERT_TYPE_OK (ctype, "C type in GB_transpose", GB0) ; //-------------------------------------------------------------------------- // apply a positional operator or user idxunop after transposing the matrix //-------------------------------------------------------------------------- op = save_op ; if (op_is_positional) { if (C->iso) { // If C was constructed as iso; it needs to be expanded first, // but do not initialize the values. These are computed by // GB_apply_op below. // set C->iso = false OK: no need to burble GB_OK (GB_convert_any_to_non_iso (C, false, Context)) ; } // the positional unary op is applied in-place: C->x = op (C) GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, op, scalar, binop_bind1st, flipij, C, Context)) ; } else if (user_idxunop) { if (C->iso) { // If C was constructed as iso; it needs to be expanded and // initialized first. GB_OK (GB_convert_any_to_non_iso (C, true, Context)) ; } if (C->type == op->ztype) { // the user-defined index unary op is applied in-place: C->x = op // (C) where the type of C does not change GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, op, scalar, binop_bind1st, flipij, C, Context)) ; } else // op is a user-defined index unary operator { // apply the operator to the transposed matrix: // C = op (C), but not in-place since the type of C is changing ctype = op->ztype ; csize = ctype->size ; size_t Cx_size = 0 ; GB_void *Cx_final = GB_MALLOC (anz_held*csize, GB_void, &Cx_size) ; if (Cx_final == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // Cx_final = op (C) GB_OK (GB_apply_op (Cx_final, ctype, GB_NON_ISO, op, scalar, false, flipij, C, Context)) ; // transplant Cx_final as C->x and finalize the type of C GB_FREE (&(C->x), C->x_size) ; C->x = Cx_final ; C->x_size = Cx_size ; C->type = ctype ; C->iso = false ; } } //-------------------------------------------------------------------------- // conform the result to the desired sparsity structure of A //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ; GB_OK (GB_conform (C, Context)) ; ASSERT_MATRIX_OK (C, "C output of GB_transpose", GB0) ; return (GrB_SUCCESS) ; }
test_core.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2020 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or modify it under the * terms of the version 2.1 (or later) of the GNU Lesser General Public License * as published by the Free Software Foundation; or version 2.0 of the Apache * License as published by the Apache Software Foundation. See the LICENSE files * for more details. * * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the LICENSE files for more details. * * You should have received a copy of the GNU Lesser General Public or the * Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/> * or <https://www.apache.org/licenses/>. */ /** * @file * * Tests for configuration management. * * @ingroup test */ #include <stdio.h> #include "relic.h" #include "relic_test.h" #if MULTI == PTHREAD void *master(void *ptr) { int *code = (int *)ptr; core_init(); RLC_THROW(ERR_NO_MEMORY); if (err_get_code() != RLC_ERR) { *code = RLC_ERR; } else { *code = RLC_OK; } core_clean(); return NULL; } void *tester(void *ptr) { int *code = (int *)ptr; core_init(); if (err_get_code() != RLC_OK) { *code = RLC_ERR; } else { *code = RLC_OK; } core_clean(); return NULL; } #endif int main(void) { int code = RLC_ERR; /* Initialize library with default configuration. */ if (core_init() != RLC_OK) { core_clean(); return 1; } util_banner("Tests for the CORE module:\n", 0); TEST_ONCE("the library context is consistent") { TEST_ASSERT(core_get() != NULL, end); } TEST_END; TEST_ONCE("switching the library context is correct") { ctx_t new_ctx, *old_ctx; /* Backup the old context. */ old_ctx = core_get(); /* Switch the library context. */ core_set(&new_ctx); /* Reinitialize library with new context. */ core_init(); /* Run function to manipulate the library context. */ RLC_THROW(ERR_NO_MEMORY); core_set(old_ctx); TEST_ASSERT(err_get_code() == RLC_OK, end); core_set(&new_ctx); TEST_ASSERT(err_get_code() == RLC_ERR, end); /* Now we need to finalize the new context. */ core_clean(); /* And restore the original context. */ core_set(old_ctx); } TEST_END; code = RLC_OK; #if MULTI == OPENMP TEST_ONCE("library context is thread-safe") { omp_set_num_threads(CORES); #pragma omp parallel shared(code) { if (omp_get_thread_num() == 0) { RLC_THROW(ERR_NO_MEMORY); if (err_get_code() != RLC_ERR) { code = RLC_ERR; } } else { core_init(); if (err_get_code() != RLC_OK) { code = RLC_ERR; } core_clean(); } } TEST_ASSERT(code == RLC_OK, end); core_init(); #pragma omp parallel copyin(core_ctx) shared(code) { if (core_get() == NULL) { code = RLC_ERR; } } TEST_ASSERT(code == RLC_OK, end); core_clean(); } TEST_END; #endif #if MULTI == PTHREAD TEST_ONCE("library context is thread-safe") { pthread_t thread[CORES]; int result[CORES] = { RLC_OK }; for (int i = 0; i < CORES; i++) { if (i == 0) { if (pthread_create(&(thread[0]), NULL, master, &(result[0]))) { code = RLC_ERR; } } else { if (pthread_create(&(thread[i]), NULL, tester, &(result[i]))) { code = RLC_ERR; } } if (result[i] != RLC_OK) { code = RLC_ERR; } } for (int i = 0; i < CORES; i++) { if (pthread_join(thread[i], NULL)) { code = RLC_ERR; } } TEST_ASSERT(code == RLC_OK, end); } TEST_END; #endif util_banner("All tests have passed.\n", 0); end: core_clean(); return code; }
GB_binop__pair_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // B,b type: double // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
par_multi_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * hypre_ParAMGBuildMultipass * This routine implements Stuben's direct interpolation with multiple passes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = NULL; HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_Int *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); HYPRE_Int *col_map_offd = NULL; HYPRE_Int num_cols_offd; hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_diag_j; hypre_CSRMatrix *P_offd; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_offd_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *send_map_start; HYPRE_Int *send_map_elmt; HYPRE_Int *send_procs; HYPRE_Int num_recvs = 0; HYPRE_Int *recv_vec_start; HYPRE_Int *recv_procs; HYPRE_Int *new_recv_vec_start = NULL; HYPRE_Int **Pext_send_map_start = NULL; HYPRE_Int **Pext_recv_vec_start = NULL; HYPRE_Int *Pext_start = NULL; HYPRE_Int *P_ncols = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int *P_marker; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *C_array; HYPRE_Int *C_array_offd = NULL; HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */ HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first point of pass j contained in pass_array */ HYPRE_Int *P_diag_start; HYPRE_Int *P_offd_start = NULL; HYPRE_Int **P_diag_pass; HYPRE_Int **P_offd_pass = NULL; HYPRE_Int **Pext_pass = NULL; HYPRE_Int **new_elmts = NULL; /* new neighbors generated in each pass */ HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for each pass */ HYPRE_Int *loc = NULL; /* contains locations for new neighbor connections in int_o_buffer to avoid searching */ HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero cols of off proc neighbors */ HYPRE_Int *Pext_send_buffer = NULL; /* used to collect global nonzero col ids in P_diag for send_map_elmts */ HYPRE_Int *map_S_to_new = NULL; /*HYPRE_Int *map_A_to_new = NULL;*/ HYPRE_Int *map_A_to_S = NULL; HYPRE_Int *new_col_map_offd = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int *permute = NULL; HYPRE_Int cnt; HYPRE_Int cnt_nz; HYPRE_Int total_nz; HYPRE_Int pass; HYPRE_Int num_passes; HYPRE_Int max_num_passes = 10; HYPRE_Int n_fine; HYPRE_Int n_coarse = 0; HYPRE_Int n_coarse_offd = 0; HYPRE_Int n_SF = 0; HYPRE_Int n_SF_offd = 0; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *assigned = NULL; HYPRE_Int *assigned_offd = NULL; HYPRE_Real *Pext_send_data = NULL; HYPRE_Real *Pext_data = NULL; HYPRE_Real sum_C, sum_N; HYPRE_Real sum_C_pos, sum_C_neg; HYPRE_Real sum_N_pos, sum_N_neg; HYPRE_Real diagonal; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Int j_start; HYPRE_Int j_end; HYPRE_Int i,i1; HYPRE_Int j,j1; HYPRE_Int k,k1,k2,k3; HYPRE_Int pass_array_size; HYPRE_Int global_pass_array_size; HYPRE_Int local_pass_array_size; HYPRE_Int my_id, num_procs; HYPRE_Int index, start; HYPRE_Int my_first_cpt; HYPRE_Int total_global_cpts; HYPRE_Int p_cnt; HYPRE_Int total_nz_offd; HYPRE_Int cnt_nz_offd; HYPRE_Int cnt_offd, cnt_new; HYPRE_Int no_break; HYPRE_Int not_found; HYPRE_Int Pext_send_size; HYPRE_Int Pext_recv_size; HYPRE_Int old_Pext_send_size; HYPRE_Int old_Pext_recv_size; HYPRE_Int P_offd_size = 0; HYPRE_Int local_index = -1; HYPRE_Int new_num_cols_offd = 0; HYPRE_Int num_cols_offd_P; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop; HYPRE_Int pass_length; HYPRE_Int *tmp_marker, *tmp_marker_offd; HYPRE_Int *tmp_array, *tmp_array_offd; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1); HYPRE_Int * cnt_nz_per_thread; HYPRE_Int * cnt_nz_offd_per_thread; /* HYPRE_Real wall_time; wall_time = hypre_MPI_Wtime(); */ /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); for(i=0; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] = 0; cnt_nz_per_thread[i] = 0; } /*----------------------------------------------------------------------- * Access the CSR vectors for A and S. Also get size of fine grid. *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; /* total_global_cpts = 0; */ if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } col_offd_S_to_A = NULL; } if (col_offd_S_to_A) { col_map_offd = col_map_offd_S; num_cols_offd = num_cols_offd_S; } else { col_map_offd = col_map_offd_A; num_cols_offd = num_cols_offd_A; } if (num_cols_offd_A) { A_offd_data = hypre_CSRMatrixData(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); } if (num_cols_offd) S_offd_j = hypre_CSRMatrixJ(S_offd); n_fine = hypre_CSRMatrixNumRows(A_diag); /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); n_coarse = 0; n_SF = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == 1) n_coarse++; else if (CF_marker[i] == -3) n_SF++; pass_array_size = n_fine-n_coarse-n_SF; if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size); pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1); if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); if (send_map_start[num_sends]) int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends]); } index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) int_buf_data[index++] = CF_marker[send_map_elmt[j]]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (num_functions > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) int_buf_data[index++] = dof_func[send_map_elmt[j]]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } n_coarse_offd = 0; n_SF_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd; i++) if (CF_marker_offd[i] == 1) n_coarse_offd++; else if (CF_marker_offd[i] == -3) n_SF_offd++; if (num_cols_offd) { assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); new_col_map_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); } /*----------------------------------------------------------------------- * First Pass: determine the maximal size of P, and elementsPerRow[i]. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Assigned points are points for which we know an interpolation * formula already, and which are thus available to interpolate from. * assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending * in which pass their interpolation formula is determined. * * pass_array contains the points ordered according to its pass, i.e. * | C-points | points of pass 1 | points of pass 2 | .... * C_points are points 0 through pass_pointer[1]-1, * points of pass k (0 < k < num_passes) are contained in points * pass_pointer[k] through pass_pointer[k+1]-1 of pass_array . * * pass_array is also used to avoid going through all points for each pass, * i,e. at the bginning it contains all points in descending order starting * with n_fine-1. Then starting from the last point, we evaluate whether * it is a C_point (pass 0). If it is the point is brought to the front * and the length of the points to be searched is shortened. This is * done until the parameter cnt (which determines the first point of * pass_array to be searched) becomes n_fine. Then all points have been * assigned a pass number. *-----------------------------------------------------------------------*/ cnt = 0; p_cnt = pass_array_size-1; P_diag_i[0] = 0; P_offd_i[0] = 0; for (i = 0; i < n_fine; i++) { if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt; /* this C point is assigned index coarse_counter on coarse grid, and in column of P */ C_array[cnt++] = i; assigned[i] = 0; P_diag_i[i+1] = 1; /* one element in row i1 of P */ P_offd_i[i+1] = 0; } else if (CF_marker[i] == -1) { pass_array[p_cnt--] = i; P_diag_i[i+1] = 0; P_offd_i[i+1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } else { P_diag_i[i+1] = 0; P_offd_i[i+1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } } index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index] = fine_to_coarse[send_map_elmt[j]]; if (int_buf_data[index] > -1) int_buf_data[index] += my_first_cpt; index++; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } new_recv_vec_start = hypre_CTAlloc(HYPRE_Int,num_recvs+1); if (n_coarse_offd) C_array_offd = hypre_CTAlloc(HYPRE_Int,n_coarse_offd); cnt = 0; new_recv_vec_start[0] = 0; for (j = 0; j < num_recvs; j++) { for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++) { if (CF_marker_offd[i] == 1) { map_S_to_new[i] = cnt; C_array_offd[cnt] = i; new_col_map_offd[cnt++] = fine_to_coarse_offd[i]; assigned_offd[i] = 0; } else { assigned_offd[i] = -1; map_S_to_new[i] = -1; } } new_recv_vec_start[j+1] = cnt; } cnt = 0; hypre_TFree(fine_to_coarse_offd); if (col_offd_S_to_A) { map_A_to_S = hypre_CTAlloc(HYPRE_Int,num_cols_offd_A); for (i=0; i < num_cols_offd_A; i++) { if (cnt < num_cols_offd && col_map_offd_A[i] == col_map_offd[cnt]) map_A_to_S[i] = cnt++; else map_A_to_S[i] = -1; } } /*----------------------------------------------------------------------- * Mark all local neighbors of C points as 'assigned'. *-----------------------------------------------------------------------*/ pass_pointer[0] = 0; pass_pointer[1] = 0; total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */ total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */ cnt = 0; cnt_offd = 0; cnt_nz = 0; cnt_nz_offd = 0; for (i = pass_array_size-1; i > cnt-1; i--) { i1 = pass_array[i]; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_i[i1+1]++; cnt_nz++; assigned[i1] = 1; } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_i[i1+1]++; cnt_nz_offd++; assigned[i1] = 1; } } if (assigned[i1] == 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; } } pass_pointer[2] = cnt; /*----------------------------------------------------------------------- * All local neighbors are assigned, now need to exchange the boundary * info for assigned strong neighbors. *-----------------------------------------------------------------------*/ index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*----------------------------------------------------------------------- * Now we need to determine strong neighbors of points of pass 1, etc. * we need to update assigned_offd after each pass *-----------------------------------------------------------------------*/ pass = 2; local_pass_array_size = pass_array_size - cnt; hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); while (global_pass_array_size && pass < max_num_passes) { for (i = pass_array_size-1; i > cnt-1; i--) { i1 = pass_array[i]; no_break = 1; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; no_break = 0; break; } } if (no_break) { for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; break; } } } } /*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/ pass++; pass_pointer[pass] = cnt; local_pass_array_size = pass_array_size - cnt; hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } hypre_TFree(int_buf_data); num_passes = pass; P_diag_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); /* P_diag_pass[i] will contain all column numbers for points of pass i */ P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int,cnt_nz); P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine); /* P_diag_start[i] contains pointer to begin of column numbers in P_pass for point i, P_diag_i[i+1] contains number of columns for point i */ P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine); if (num_procs > 1) { P_offd_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); if (cnt_nz_offd) P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int,cnt_nz_offd); else P_offd_pass[1] = NULL; new_elmts = hypre_CTAlloc(HYPRE_Int*,num_passes); new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1); new_counter[0] = 0; new_counter[1] = n_coarse_offd; new_num_cols_offd = n_coarse_offd; new_elmts[0] = new_col_map_offd; } /*----------------------------------------------------------------------- * Pass 1: now we consider points of pass 1, with strong C_neighbors, *-----------------------------------------------------------------------*/ cnt_nz = 0; cnt_nz_offd = 0; /* JBS: Possible candidate for threading */ for (i=pass_pointer[1]; i < pass_pointer[2]; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; } } } total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; if (num_procs > 1) { tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1); Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*,num_passes); Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*,num_passes); Pext_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1); if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd); if (send_map_start[num_sends]) P_ncols = hypre_CTAlloc(HYPRE_Int,send_map_start[num_sends]); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd+1; i++) { Pext_i[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < send_map_start[num_sends]; i++) { P_ncols[i] = 0; } } old_Pext_send_size = 0; old_Pext_recv_size = 0; for (pass=2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1); Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1); Pext_send_size = 0; Pext_send_map_start[pass][0] = 0; for (i=0; i < num_sends; i++) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE #endif for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1]; Pext_send_size += P_ncols[j]; } } Pext_send_map_start[pass][i+1] = Pext_send_size; } comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg, P_ncols, &Pext_i[1]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_buffer); Pext_send_buffer = hypre_CTAlloc(HYPRE_Int, Pext_send_size); } old_Pext_send_size = Pext_send_size; } cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_buffer[cnt_offd++] = my_first_cpt +P_diag_pass[pass-1][k]; } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; k3 = 0; while (k3 < pass-1) { if (k1 < new_counter[k3+1]) { k2 = k1-new_counter[k3]; Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2]; break; } k3++; } } } } } if (num_procs > 1) { Pext_recv_size = 0; Pext_recv_vec_start[pass][0] = 0; cnt_offd = 0; for (i=0; i < num_recvs; i++) { for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++) { if (assigned_offd[j] == pass-1) { Pext_start[j] = cnt_offd; cnt_offd += Pext_i[j+1]; } } Pext_recv_size = cnt_offd; Pext_recv_vec_start[pass][i+1] = Pext_recv_size; } hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; if (Pext_recv_size) { Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size); new_elmts[pass-1] = hypre_CTAlloc(HYPRE_Int,Pext_recv_size); } else { Pext_pass[pass] = NULL; new_elmts[pass-1] = NULL; } comm_handle = hypre_ParCSRCommHandleCreate (11, tmp_comm_pkg, Pext_send_buffer, Pext_pass[pass]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(loc); loc = hypre_CTAlloc(HYPRE_Int,Pext_recv_size); } old_Pext_recv_size = Pext_recv_size; } cnt_new = 0; cnt_offd = 0; /* JBS: Possible candidate for threading */ for (i=0; i < num_recvs; i++) { for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++) { if (assigned_offd[j] == pass-1) { for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++) { k1 = Pext_pass[pass][j1]; k2 = k1 - my_first_cpt; if (k2 > -1 && k2 < n_coarse) { Pext_pass[pass][j1] = -k2-1; } else { not_found = 1; k3 = 0; while (k3 < pass-1 && not_found) { k2 = hypre_BinarySearch(new_elmts[k3], k1, (new_counter[k3+1]-new_counter[k3])); if (k2 > -1) { Pext_pass[pass][j1] = k2 + new_counter[k3]; not_found = 0; } else { k3++; } } if (not_found) { new_elmts[pass-1][cnt_new] = Pext_pass[pass][j1]; loc[cnt_new++] = j1; } } } cnt_offd += Pext_i[j+1]; } } } if (cnt_new) { hypre_qsort2i(new_elmts[pass-1],loc,0,cnt_new-1); cnt = 0; local_index = new_counter[pass-1]; Pext_pass[pass][loc[0]] = local_index; for (i=1; i < cnt_new; i++) { if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt]) { new_elmts[pass-1][++cnt] = new_elmts[pass-1][i]; local_index++; } Pext_pass[pass][loc[i]] = local_index; } new_counter[pass] = local_index+1; } else if (num_procs > 1) new_counter[pass] = new_counter[pass-1]; if (new_num_cols_offd < local_index+1) { new_num_cols_offd = local_index+1; } pass_length = pass_pointer[pass+1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd) #endif { /* Thread by computing the sparsity structure for this pass only over * each thread's range of rows. Rows are divided up evenly amongst * the threads. The necessary thread-wise temporary arrays, like * P_marker, are initialized and de-allocated internally to the * parallel region. */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_length; } else { thread_stop = (pass_length/num_threads)*(my_thread_num+1); } thread_start += pass_pointer[pass]; thread_stop += pass_pointer[pass]; /* Local initializations */ cnt_nz = 0; cnt_nz_offd = 0; /* This block of code is to go to the top of the parallel region starting before * the loop over num_passes. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse); /* marks points to see if they're counted */ for (i=0; i < n_coarse; i++) { P_marker[i] = -1; } if (new_num_cols_offd == local_index+1) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); for (i=0; i < new_num_cols_offd; i++) { P_marker_offd[i] = -1; } } else if (n_coarse_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); for (i=0; i < n_coarse_offd; i++) { P_marker_offd[i] = -1; } } /* Need some variables to store each threads cnt_nz and cnt_nz_offd, and * then stitch things together as in par_interp.c * This loop writes * P_diag_i, P_offd_i: data parallel here, and require no special treatment * P_diag_start, P_offd_start: are not data parallel, require special treatment */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_diag_pass[pass-1][k]; if (P_marker[k1] != i1) { cnt_nz++; P_diag_i[i1+1]++; P_marker[k1] = i1; } } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1+1]++; P_marker_offd[k1] = i1; } } } } j_start = 0; for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1-1] != i1) { cnt_nz++; P_diag_i[i1+1]++; P_marker[-k1-1] = i1; } } else if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1+1]++; P_marker_offd[k1] = i1; } } } } } /* Update P_diag_start, P_offd_start with cumulative * nonzero counts over all threads */ if(my_thread_num == 0) { max_num_threads[0] = num_threads; } cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd; cnt_nz_per_thread[my_thread_num] = cnt_nz; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { for(i = 1; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1]; cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num > 0) { /* update this thread's section of P_diag_start and P_offd_start * with the num of nz's counted by previous threads */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1]; P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1]; } } else /* if my_thread_num == 0 */ { /* Grab the nz count for all threads */ cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1]; cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1]; /* Updated total nz count */ total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; /* Allocate P_diag_pass and P_offd_pass for all threads */ P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz); if (cnt_nz_offd) P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd); else if (num_procs > 1) P_offd_pass[pass] = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* offset cnt_nz and cnt_nz_offd to point to the starting * point in P_diag_pass and P_offd_pass for each thread */ if(my_thread_num > 0) { cnt_nz = cnt_nz_per_thread[my_thread_num-1]; cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1]; } else { cnt_nz = 0; cnt_nz_offd = 0; } /* Set P_diag_pass and P_offd_pass */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_diag_pass[pass-1][k]; if (P_marker[k1] != -i1-1) { P_diag_pass[pass][cnt_nz++] = k1; P_marker[k1] = -i1-1; } } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; if (P_marker_offd[k1] != -i1-1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1-1; } } } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1-1] != -i1-1) { P_diag_pass[pass][cnt_nz++] = -k1-1; P_marker[-k1-1] = -i1-1; } } else if (P_marker_offd[k1] != -i1-1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1-1; } } } } } hypre_TFree(P_marker); if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) ) { hypre_TFree(P_marker_offd); } } /* End parallel region */ } hypre_TFree(loc); hypre_TFree(P_ncols); hypre_TFree(Pext_send_buffer); hypre_TFree(new_recv_vec_start); hypre_TFree(cnt_nz_per_thread); hypre_TFree(cnt_nz_offd_per_thread); hypre_TFree(max_num_threads); P_diag_j = hypre_CTAlloc(HYPRE_Int,total_nz); P_diag_data = hypre_CTAlloc(HYPRE_Real,total_nz); if (total_nz_offd) { P_offd_j = hypre_CTAlloc(HYPRE_Int,total_nz_offd); P_offd_data = hypre_CTAlloc(HYPRE_Real,total_nz_offd); } for (i=0; i < n_fine; i++) { P_diag_i[i+1] += P_diag_i[i]; P_offd_i[i+1] += P_offd_i[i]; } /* determine P for coarse points */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_coarse; i++) { i1 = C_array[i]; P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1]; P_diag_data[P_diag_i[i1]] = 1.0; } if (weight_option) /*if this is set, weights are separated into negative and positive offdiagonals and accumulated accordingly */ { pass_length = pass_pointer[2]-pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int,n_fine); for (i=0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) P_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_pos = 0; sum_C_neg = 0; sum_N_pos = 0; sum_N_neg = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; P_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) sum_N_neg += A_diag_data[j]; else sum_N_pos += A_diag_data[j]; } if (j1 != -1 && P_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; if (A_diag_data[j] < 0) sum_C_neg += A_diag_data[j]; else sum_C_pos += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; P_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { if (A_offd_data[j] < 0) sum_N_neg += A_offd_data[j]; else sum_N_pos += A_offd_data[j]; } if (j1 != -1 && P_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; if (A_offd_data[j] < 0) sum_C_neg += A_offd_data[j]; else sum_C_pos += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg*diagonal) alfa = -sum_N_neg/(sum_C_neg*diagonal); if (sum_C_pos*diagonal) beta = -sum_N_pos/(sum_C_pos*diagonal); for (j=P_diag_i[i1]; j < cnt; j++) if (P_diag_data[j] < 0) P_diag_data[j] *= alfa; else P_diag_data[j] *= beta; for (j=P_offd_i[i1]; j < cnt_offd; j++) if (P_offd_data[j] < 0) P_offd_data[j] *= alfa; else P_offd_data[j] *= beta; } hypre_TFree(P_marker); if (num_cols_offd) { hypre_TFree(P_marker_offd); } } /* End Parallel Region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; /*if (!col_offd_S_to_A) hypre_TFree(map_A_to_new);*/ if (n_coarse) hypre_TFree(C_array); hypre_TFree(C_array_offd); hypre_TFree(P_diag_pass[1]); if (num_procs > 1) hypre_TFree(P_offd_pass[1]); for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass]); hypre_TFree(Pext_recv_vec_start[pass]); } pass_length = pass_pointer[pass+1]-pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int,n_fine); for (i=0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) P_marker_offd[i] = -1; } C_array = NULL; C_array_offd = NULL; if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse); } if (new_num_cols_offd > n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd); } else if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); } /* Loop over each thread's row-range */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_neg = 0; sum_C_pos = 0; sum_N_neg = 0; sum_N_pos = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; cnt = P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; C_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; C_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) P_marker[j1] = i1; } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) P_marker_offd[j1] = i1; } for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (P_marker[j1] == i1) { for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j]*P_diag_data[k]; P_diag_data[C_array[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j]*P_offd_data[k]; P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) sum_N_neg += A_diag_data[j]; else sum_N_pos += A_diag_data[j]; } } } for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (j1 > -1 && P_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j]*Pext_data[k]; if (k1 < 0) P_diag_data[C_array[-k1-1]] += alfa; else P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { if ( A_offd_data[j] < 0) sum_N_neg += A_offd_data[j]; else sum_N_pos += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg*diagonal) alfa = -sum_N_neg/(sum_C_neg*diagonal); if (sum_C_pos*diagonal) beta = -sum_N_pos/(sum_C_pos*diagonal); for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++) if (P_diag_data[j] < 0) P_diag_data[j] *= alfa; else P_diag_data[j] *= beta; for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++) if (P_offd_data[j] < 0) P_offd_data[j] *= alfa; else P_offd_data[j] *= beta; } hypre_TFree(C_array); hypre_TFree(C_array_offd); hypre_TFree(P_marker); if (num_cols_offd) { hypre_TFree(P_marker_offd); } } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass]); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass]); hypre_TFree(Pext_pass[pass]); } } /* End num_passes for-loop */ } else /* no distinction between positive and negative offdiagonal element */ { pass_length = pass_pointer[2]-pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int,n_fine); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); } for (i=0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i=0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; tmp_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) sum_N += A_diag_data[j]; if (j1 != -1 && tmp_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; sum_C += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; tmp_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) sum_N += A_offd_data[j]; if (j1 != -1 && tmp_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; sum_C += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal); for (j=P_diag_i[i1]; j < cnt; j++) P_diag_data[j] *= alfa; for (j=P_offd_i[i1]; j < cnt_offd; j++) P_offd_data[j] *= alfa; } hypre_TFree(tmp_marker); hypre_TFree(tmp_marker_offd); } /* end OMP parallel region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) hypre_TFree(C_array); hypre_TFree(C_array_offd); hypre_TFree(P_diag_pass[1]); if (num_procs > 1) hypre_TFree(P_offd_pass[1]); for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass]); hypre_TFree(Pext_recv_vec_start[pass]); } pass_length = pass_pointer[pass+1]-pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int,n_fine); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); } tmp_array = NULL; if (n_coarse) { tmp_array = hypre_CTAlloc(HYPRE_Int,n_coarse); } tmp_array_offd = NULL; if (new_num_cols_offd > n_coarse_offd) { tmp_array_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); } else { tmp_array_offd = hypre_CTAlloc(HYPRE_Int,n_coarse_offd);} for (i=0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i=0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); } for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; cnt = P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; tmp_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; tmp_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) tmp_marker[j1] = i1; } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) tmp_marker_offd[j1] = i1; } for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (tmp_marker[j1] == i1) { for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j]*P_diag_data[k]; P_diag_data[tmp_array[k1]] += alfa; sum_C += alfa; sum_N += alfa; } for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j]*P_offd_data[k]; P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) sum_N += A_diag_data[j]; } } for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (j1 > -1 && tmp_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j]*Pext_data[k]; if (k1 < 0) P_diag_data[tmp_array[-k1-1]] += alfa; else P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) sum_N += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal); for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++) P_diag_data[j] *= alfa; for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++) P_offd_data[j] *= alfa; } hypre_TFree(tmp_marker); hypre_TFree(tmp_marker_offd); hypre_TFree(tmp_array); hypre_TFree(tmp_array_offd); } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass]); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass]); hypre_TFree(Pext_pass[pass]); } } } hypre_TFree(CF_marker_offd); hypre_TFree(Pext_send_map_start); hypre_TFree(Pext_recv_vec_start); hypre_TFree(dof_func_offd); hypre_TFree(Pext_send_data); hypre_TFree(Pext_data); hypre_TFree(P_diag_pass); hypre_TFree(P_offd_pass); hypre_TFree(Pext_pass); hypre_TFree(P_diag_start); hypre_TFree(P_offd_start); hypre_TFree(Pext_start); hypre_TFree(Pext_i); hypre_TFree(fine_to_coarse); hypre_TFree(assigned); hypre_TFree(assigned_offd); hypre_TFree(pass_pointer); hypre_TFree(pass_array); hypre_TFree(map_S_to_new); hypre_TFree(map_A_to_S); if (num_procs > 1) hypre_TFree(tmp_comm_pkg); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */ if (trunc_factor != 0.0 || P_max_elmts != 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); } P_offd_size = P_offd_i[n_fine]; num_cols_offd_P = 0; if (P_offd_size) { if (new_num_cols_offd > num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); } else { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_num_cols_offd; i++) { P_marker_offd[i] = 0; } num_cols_offd_P = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker_offd[index]) { num_cols_offd_P++; P_marker_offd[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_offd_P); permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1]); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_counter[num_passes-1]; i++) permute[i] = -1; cnt = 0; for (i=0; i < num_passes-1; i++) { for (j=new_counter[i]; j < new_counter[i+1]; j++) { if (P_marker_offd[j]) { col_map_offd_P[cnt] = new_elmts[i][j-new_counter[i]]; permute[j] = col_map_offd_P[cnt++]; } } } hypre_qsort0(col_map_offd_P,0,num_cols_offd_P-1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,k1) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_counter[num_passes-1]; i++) { k1 = permute[i]; if (k1 != -1) permute[i] = hypre_BinarySearch(col_map_offd_P,k1,num_cols_offd_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = permute[P_offd_j[i]]; } hypre_TFree(P_marker_offd); } if (num_procs > 1) { for (i=0; i < num_passes-1; i++) hypre_TFree(new_elmts[i]); } hypre_TFree(permute); hypre_TFree(new_elmts); hypre_TFree(new_counter); if (num_cols_offd_P) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P; } if (n_SF) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; } if (num_procs > 1) { hypre_MatvecCommPkgCreate(P); } *P_ptr = P; /* wall_time = hypre_MPI_Wtime() - wall_time; hypre_printf("TOTAL TIME %1.2e \n",wall_time); */ /*----------------------------------------------------------------------- * Build and return dof_func array for coarse grid. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Free mapping vector and marker array. *-----------------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime(); #endif return(0); }
ordering_op-inl.h
#include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of ordering operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); DMLC_DECLARE_FIELD(dtype) .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " "An error will be raised if the selected data type cannot precisely represent the " "indices."); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); DMLC_DECLARE_FIELD(dtype) .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" " \"both\". An error will be raised if the selected data type cannot precisely " "represent the indices."); } }; inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape, int *batch_size, int *element_num, int *axis, int *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; *batch_size = src_shape.Size() / src_shape[*axis]; *element_num = src_shape[*axis]; if (*axis != static_cast<int>(src_shape.ndim()) - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; struct fill_ind_to_one { template<typename DType> MSHADOW_XINLINE static void Map(int i, const int* indices, DType* out) { out[indices[i]] = static_cast<DType>(1); } }; struct fill_ind { template<typename DType> MSHADOW_XINLINE static void Map(int i, const int* indices, const DType* val, int req, DType* out) { KERNEL_ASSIGN(out[indices[i]], req, val[i]); } }; template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat, const Tensor<cpu, 1, int>& ind, const Tensor<cpu, 1, char>& work, int K, int N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const int M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < M; ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. DType *vals = reinterpret_cast<DType*>(work.dptr_); DType *sorted_vals = dat.dptr_+i*N; int *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; }); } } for (int j = 0; j < K; ++j) { sorted_vals[j] = vals[indices[j]]; } } } #ifdef __HIPCC__ template<typename DType> MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType> MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. int i1(K-1), i2(K-1); for (int i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (int i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType> __global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) { // Buffer for pairwise reduction. HIP_DYNAMIC_SHARED( int, buff) // Start of buffer sections associated with this thread. const int offset(threadIdx.x*K); int *ind_buff = &buff[offset]; DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (int i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (int i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); int cur_ind(ind[i]); for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (int i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat, const Tensor<gpu, 1, int>& ind, const Tensor<gpu, 1, char>& work, int K, int N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const int M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. size_t alignment = std::max(sizeof(DType), sizeof(int)); size_t id_size = PadBytes(sizeof(int) * ind.size(0), alignment); Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const int nthreads(mshadow::cuda::kBaseThreadNum); hipLaunchKernelGGL((PartialSortSmallK), dim3(M), dim3(nthreads), nthreads*K*(sizeof(int)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s), K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param k the K elements to keep * \param param the topk parameters * \tparam xpu the device type. * \tparam DType type of the output value/mask. * \tparam IDType type of the output indices. */ template<typename xpu, typename DType, typename IDType> void TopKImpl(const RunContext &ctx, const Resource &resource, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, int> indices, sel_indices; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; size_t alignment = std::max(sizeof(DType), sizeof(int)); TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>()) << "'IDType' does not have a sufficient precision to represent the indices of the input array. " << "The total element_num is " << element_num << ", but the selected IDType can only represent " << mxnet::common::MaxIntegerValue<IDType>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); size_t temp_size = 0; // Temp space needed by the gpu-based full sorts. temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, DType, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<DType, int, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(int) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, static_cast<size_t>(sizeof(DType) * src.Size())); size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(int) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(int) * batch_size * k, alignment); } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(int) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(int) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }) }); } else { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }); } } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); }); } template<typename xpu, typename DType, typename IDType> void TopKBackwardImpl(const OpContext &ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>()) << "'IDType' does not have a sufficient precision to represent the indices of the input array. " << "The total element_num is " << element_num << ", but the selected IDType can only represent " << mxnet::common::MaxIntegerValue<IDType>() << " elements"; Tensor<xpu, 1, int> workspace = ctx.requested[0].get_space_typed<xpu, 1, int>(Shape1(batch_size * k + batch_size), s); Tensor<xpu, 1, int> sel_indices = Tensor<xpu, 1, int>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, int> batch_shift = Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 2, DType> out_grad = inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, DType> in_grad = outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0, element_num, kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s); TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += tcast<int>(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, IDType> indices = inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); sel_indices = reshape(tcast<int>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0] || kAddTo == req[0]) { if (kWriteTo == req[0]) { in_grad = scalar<DType>(0); } mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_); } else { LOG(FATAL) << "Not Implemented!"; } } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param); }); }); } else if (param.ret_typ == topk_enum::kReturnValue) { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKBackwardImpl<xpu, DType, int>(ctx, inputs, req, outputs, param); }); } else { LOG(FATAL) << "Not Implemented"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK(out_size == 1 || out_size == 2); if (out_size > 1) { if (param.ret_typ == topk_enum::kReturnValue) { CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices."; } } if (param.ret_typ == topk_enum::kReturnIndices) { CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; } return true; } inline bool TopKShapeImpl(const TopKParam& param, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } TShape& in_shape = (*in_attrs)[0]; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK_EQ(out_size, 2); CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) << "Failed to set the type of ret_indices to int32."; CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; return true; } inline bool SortShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices to int32."; return true; } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
rawSHA384_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2010 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. * * Rewritten Spring 2013, JimF. SSE code added and released with the following terms: * No copyright is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the public * domain is deemed null and void, then the software is Copyright (c) 2011 JimF * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawSHA384; #elif FMT_REGISTERS_H john_register_one(&fmt_rawSHA384); #else #include "arch.h" #include "sha2.h" #include "stdint.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "formats.h" //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-SHA384" #define FORMAT_NAME "" #define FORMAT_TAG "$SHA384$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #ifdef SIMD_COEF_64 #define PLAINTEXT_LENGTH 111 #else #define PLAINTEXT_LENGTH 125 #endif #define CIPHERTEXT_LENGTH 96 #define BINARY_SIZE DIGEST_SIZE #define DIGEST_SIZE 48 #define DIGEST_SIZE_512 64 #define BINARY_ALIGN 8 #define SALT_SIZE 0 #define SALT_ALIGN 1 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"a8b64babd0aca91a59bdbb7761b421d4f2bb38280d3a75ba0f21f2bebc45583d446c598660c94ce680c47d19c30783a7", "password"}, {"$SHA384$a8b64babd0aca91a59bdbb7761b421d4f2bb38280d3a75ba0f21f2bebc45583d446c598660c94ce680c47d19c30783a7", "password"}, {"$SHA384$8cafed2235386cc5855e75f0d34f103ccc183912e5f02446b77c66539f776e4bf2bf87339b4518a7cb1c2441c568b0f8", "12345678"}, {"$SHA384$38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", ""}, {"94e75dd8e1f16d7df761d76c021ad98c283791008b98368e891f411fc5aa1a83ef289e348abdecf5e1ba6971604a0cb0", "UPPERCASE"}, {NULL} }; #ifdef SIMD_COEF_64 #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 ) static ARCH_WORD_64 (*saved_key); static ARCH_WORD_64 (*crypt_out); #else static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_64 (*crypt_out)[DIGEST_SIZE / sizeof(ARCH_WORD_64)]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifndef SIMD_COEF_64 saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); #else saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * DIGEST_SIZE_512 / sizeof(ARCH_WORD_64), sizeof(*crypt_out), MEM_ALIGN_SIMD); #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); #ifndef SIMD_COEF_64 MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += 8; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } void *get_binary(char *ciphertext) { static ARCH_WORD_64 *outw; unsigned char *out; char *p; int i; if (!outw) outw = mem_calloc_tiny(DIGEST_SIZE, BINARY_ALIGN); out = (unsigned char*)outw; p = ciphertext + TAG_LENGTH; for (i = 0; i < DIGEST_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_64 alter_endianity_to_BE64(out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha384_reverse(outw); #endif #endif return out; } #ifdef SIMD_COEF_64 #define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64 + 3*SIMD_COEF_64) static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][3] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][3] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][3] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][3] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][3] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][3] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][3] & PH_MASK_6; } #endif static int binary_hash_0(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((ARCH_WORD_64*)binary)[3] & PH_MASK_6; } static void set_key(char *key, int index) { #ifdef SIMD_COEF_64 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t)); const ARCH_WORD_64 *wkey = is_aligned(key, sizeof(uint64_t)) ? (ARCH_WORD_64*)key : (ARCH_WORD_64*)strcpy(buf_aligned, key); #endif ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; ARCH_WORD_64 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_64 temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24)); len+=3; goto key_cleaning; } if (!(temp & 0xff00000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32)); len+=4; goto key_cleaning; } if (!(temp & 0xff0000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40)); len+=5; goto key_cleaning; } if (!(temp & 0xff000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48)); len+=6; goto key_cleaning; } if (!(temp & 0xff00000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56)); len+=7; goto key_cleaning; } *keybuf_word = JOHNSWAP64(temp); len += 8; keybuf_word += SIMD_COEF_64; } *keybuf_word = 0x8000000000000000ULL; key_cleaning: keybuf_word += SIMD_COEF_64; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_64; } keybuffer[15*SIMD_COEF_64] = len << 3; #else int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_64 unsigned i; ARCH_WORD_64 s; static char out[PLAINTEXT_LENGTH + 1]; unsigned char *wucp = (unsigned char*)saved_key; s = ((ARCH_WORD_64*)saved_key)[15*SIMD_COEF_64 + (index&(SIMD_COEF_64-1)) + index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] >> 3; for(i = 0; i < (unsigned)s; i++) out[i] = wucp[ GETPOS(i, index) ]; out[i] = 0; return (char*) out; #else saved_key[index][saved_len[index]] = 0; return saved_key[index]; #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64], &crypt_out[index/SIMD_COEF_64*8*SIMD_COEF_64], NULL, SSEi_REVERSE_STEPS|SSEi_MIXED_IN|SSEi_CRYPT_SHA384); #else SHA512_CTX ctx; SHA384_Init(&ctx); SHA384_Update(&ctx, saved_key[index], saved_len[index]); SHA384_Final((unsigned char *)crypt_out[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((ARCH_WORD_64*)binary)[3] == crypt_out[HASH_IDX]) #else if ( ((ARCH_WORD_64*)binary)[0] == crypt_out[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 return ((ARCH_WORD_64*)binary)[3] == crypt_out[HASH_IDX]; #else return *(ARCH_WORD_64*)binary == crypt_out[index][0]; #endif } static int cmp_exact(char *source, int index) { ARCH_WORD_64 *binary = get_binary(source); char *key = get_key(index); SHA512_CTX ctx; ARCH_WORD_64 crypt_out[DIGEST_SIZE / sizeof(ARCH_WORD_64)]; SHA384_Init(&ctx); SHA384_Update(&ctx, key, strlen(key)); SHA384_Final((unsigned char*)crypt_out, &ctx); #ifdef SIMD_COEF_64 alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha384_reverse(crypt_out); #endif #endif return !memcmp(binary, crypt_out, DIGEST_SIZE); } struct fmt_main fmt_rawSHA384 = { { FORMAT_LABEL, FORMAT_NAME, "SHA384 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1)*cosine)+ fabs((double) (image->rows-1)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) (MagickMax((image->columns-1), (image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1)* (image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1)/2.0; gradient->radii.y=(double) (image->rows-1)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) MagickMin((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **magick_restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict paint_indexes; register ssize_t x; register PixelPacket *magick_restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,OilPaintImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); ConformMagickPixelPacket(image,fill,&conform_fill,exception); ConformMagickPixelPacket(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,&conform_target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(conform_fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(conform_fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(conform_fill.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,OpaquePaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransparentPaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransparentPaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM37 Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM37_CLANG_AST_OPENMPCLAUSE_H #define LLVM37_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. // // /////////////////////////////////////////////////////////////////////////////// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } StmtRange children(); ConstStmtRange children() const { return const_cast<OMPClause *>(this)->children(); } static bool classof(const OMPClause *) { return true; } }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm37::RoundUpToAlignment(sizeof(T), llvm37::alignOf<Expr *>())), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy( VL.begin(), VL.end(), reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm37::RoundUpToAlignment(sizeof(T), llvm37::alignOf<Expr *>()))); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm37::iterator_range<varlist_iterator> varlist_range; typedef llvm37::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm37::makeArrayRef( reinterpret_cast<const Expr *const *>( reinterpret_cast<const char *>(this) + llvm37::RoundUpToAlignment(sizeof(T), llvm37::alignOf<const Expr *>())), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' /// clause with condition 'a > 5'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPIfClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } StmtRange children() { return StmtRange(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } StmtRange children() { return StmtRange(&Safelen, &Safelen + 1); } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } StmtRange children() { return StmtRange(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size and a reference to pseudo variable for combined /// directives. enum { CHUNK_SIZE, HELPER_CHUNK_SIZE, NUM_EXPRS }; Stmt *ChunkSizes[NUM_EXPRS]; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSizes[CHUNK_SIZE] = E; } /// \brief Set helper chunk size. /// /// \param E Helper chunk size. /// void setHelperChunkSize(Expr *E) { ChunkSizes[HELPER_CHUNK_SIZE] = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Expr *HelperChunkSize) : OMPClause(OMPC_schedule, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc) { ChunkSizes[CHUNK_SIZE] = ChunkSize; ChunkSizes[HELPER_CHUNK_SIZE] = HelperChunkSize; } /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), Kind(OMPC_SCHEDULE_unknown) { ChunkSizes[CHUNK_SIZE] = nullptr; ChunkSizes[HELPER_CHUNK_SIZE] = nullptr; } /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return dyn_cast_or_null<Expr>(ChunkSizes[CHUNK_SIZE]); } /// \brief Get chunk size. /// Expr *getChunkSize() const { return dyn_cast_or_null<Expr>(ChunkSizes[CHUNK_SIZE]); } /// \brief Get helper chunk size. /// Expr *getHelperChunkSize() { return dyn_cast_or_null<Expr>(ChunkSizes[HELPER_CHUNK_SIZE]); } /// \brief Get helper chunk size. /// Expr *getHelperChunkSize() const { return dyn_cast_or_null<Expr>(ChunkSizes[HELPER_CHUNK_SIZE]); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } StmtRange children() { return StmtRange(&ChunkSizes[CHUNK_SIZE], &ChunkSizes[CHUNK_SIZE] + 1); } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause. /// class OMPOrderedClause : public OMPClause { public: /// \brief Build 'ordered' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } StmtRange children() { return StmtRange(); } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. /// class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm37::iterator_range<private_copies_iterator> private_copies_range; typedef llvm37::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm37::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm37::iterator_range<private_copies_iterator> private_copies_range; typedef llvm37::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm37::iterator_range<inits_iterator> inits_range; typedef llvm37::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm37::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm37::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm37::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm37::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm37::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. /// class OMPSharedClause : public OMPVarListClause<OMPSharedClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause : public OMPVarListClause<OMPReductionClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm37::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm37::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm37::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm37::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause : public OMPVarListClause<OMPLinearClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Inits[]; Updates[]; Finals[]; /// Step; CalcStep; } /// MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm37::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm37::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. static OMPLinearClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm37::iterator_range<inits_iterator> inits_range; typedef llvm37::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm37::iterator_range<updates_iterator> updates_range; typedef llvm37::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm37::iterator_range<finals_iterator> finals_range; typedef llvm37::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. /// class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm37::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm37::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm37::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm37::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm37::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm37::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm37::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm37::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm37::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. /// class OMPFlushClause : public OMPVarListClause<OMPFlushClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// \brief This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. /// class OMPDependClause : public OMPVarListClause<OMPDependClause> { friend class OMPClauseReader; /// \brief Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind; /// \brief Dependency type location. SourceLocation DepLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPDependClause(unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// \brief Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// static OMPDependClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// \brief Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; } // end namespace clang #endif
9915.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 8) for (t10 = t8; t10 <= (ny - 1 < t8 + 7 ? ny - 1 : t8 + 7); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 8) for (t10 = t8; t10 <= (ny - 1 < t8 + 7 ? ny - 1 : t8 + 7); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 2 ? t4 + 7 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 8) for (t10 = t8; t10 <= (ny - 2 < t8 + 7 ? ny - 2 : t8 + 7); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
GB_unop__creal_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__creal_fp32_fc32) // op(A') function: GB (_unop_tran__creal_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = crealf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = crealf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = crealf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CREAL || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__creal_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = crealf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = crealf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__creal_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
scale.c
/* * OpenMP implementatation of scaling a 2D array. This simple code is used to illustrat benefits of * multi-threaded parallelism and limits on performance scalability (i.e., Amdahl's Law) * * @author Apan Qasem */ #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include <omp.h> #define REPS 100 double t0; double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } int main(int argc, char *argv[]) { int **a, **b; int M = atoi(argv[1]); int N = atoi(argv[2]); omp_set_num_threads(N); a = (int **) malloc(sizeof(int *) * M); b = (int **) malloc(sizeof(int *) * M); int i, j, k; for (i = 0; i < M; i++) { a[i] = (int *) malloc(sizeof(int) * M); b[i] = (int *) malloc(sizeof(int) * M); } for (j = 0; j < M; j++) for (i = 0; i < M; i++) b[i][j] = i + j; t0 = mysecond(); #pragma omp parallel for private(j,i) for (k = 0; k < REPS; k++) { for (j = 0; j < M; j++) for (i = 0; i < M; i++) a[i][j] = b[i][j] * 17; } t0 = (mysecond() - t0) * 1.e3; printf("parallel loop = %3.2f ms\n", t0); return 0; }
parser.h
/* Data structures and function exported by the C++ Parser. Copyright (C) 2010-2017 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_PARSER_H #define GCC_CP_PARSER_H #include "tree.h" #include "cp/cp-tree.h" #include "c-family/c-pragma.h" /* A token's value and its associated deferred access checks and qualifying scope. */ struct GTY(()) tree_check { /* The value associated with the token. */ tree value; /* The checks that have been associated with value. */ vec<deferred_access_check, va_gc> *checks; /* The token's qualifying scope (used when it is a CPP_NESTED_NAME_SPECIFIER). */ tree qualifying_scope; }; /* A C++ token. */ struct GTY (()) cp_token { /* The kind of token. */ ENUM_BITFIELD (cpp_ttype) type : 8; /* If this token is a keyword, this value indicates which keyword. Otherwise, this value is RID_MAX. */ ENUM_BITFIELD (rid) keyword : 8; /* Token flags. */ unsigned char flags; /* True if this token is from a context where it is implicitly extern "C" */ BOOL_BITFIELD implicit_extern_c : 1; /* True if an error has already been reported for this token, such as a CPP_NAME token that is not a keyword (i.e., for which KEYWORD is RID_MAX) iff this name was looked up and found to be ambiguous. */ BOOL_BITFIELD error_reported : 1; /* True for a token that has been purged. If a token is purged, it is no longer a valid token and it should be considered deleted. */ BOOL_BITFIELD purged_p : 1; /* 5 unused bits. */ /* The location at which this token was found. */ location_t location; /* The value associated with this token, if any. */ union cp_token_value { /* Used for compound tokens such as CPP_NESTED_NAME_SPECIFIER. */ struct tree_check* GTY((tag ("1"))) tree_check_value; /* Use for all other tokens. */ tree GTY((tag ("0"))) value; } GTY((desc ("(%1.type == CPP_TEMPLATE_ID)" "|| (%1.type == CPP_NESTED_NAME_SPECIFIER)" "|| (%1.type == CPP_DECLTYPE)"))) u; }; /* We use a stack of token pointer for saving token sets. */ typedef struct cp_token *cp_token_position; /* The cp_lexer structure represents the C++ lexer. It is responsible for managing the token stream from the preprocessor and supplying it to the parser. Tokens are never added to the cp_lexer after it is created. */ struct GTY (()) cp_lexer { /* The memory allocated for the buffer. NULL if this lexer does not own the token buffer. */ vec<cp_token, va_gc> *buffer; /* A pointer just past the last available token. The tokens in this lexer are [buffer, last_token). */ cp_token_position GTY ((skip)) last_token; /* The next available token. If NEXT_TOKEN is &eof_token, then there are no more available tokens. */ cp_token_position GTY ((skip)) next_token; /* A stack indicating positions at which cp_lexer_save_tokens was called. The top entry is the most recent position at which we began saving tokens. If the stack is non-empty, we are saving tokens. */ vec<cp_token_position> GTY ((skip)) saved_tokens; /* The next lexer in a linked list of lexers. */ struct cp_lexer *next; /* True if we should output debugging information. */ bool debugging_p; /* True if we're in the context of parsing a pragma, and should not increment past the end-of-line marker. */ bool in_pragma; }; /* cp_token_cache is a range of tokens. There is no need to represent allocate heap memory for it, since tokens are never removed from the lexer's array. There is also no need for the GC to walk through a cp_token_cache, since everything in here is referenced through a lexer. */ struct GTY(()) cp_token_cache { /* The beginning of the token range. */ cp_token * GTY((skip)) first; /* Points immediately after the last token in the range. */ cp_token * GTY ((skip)) last; }; typedef cp_token_cache *cp_token_cache_ptr; struct cp_token_ident { unsigned int ident_len; const char *ident_str; unsigned int before_len; const char *before_str; unsigned int after_len; const char *after_str; }; /* An entry in a queue of function arguments that require post-processing. */ struct GTY(()) cp_default_arg_entry { /* The current_class_type when we parsed this arg. */ tree class_type; /* The function decl itself. */ tree decl; }; /* An entry in a stack for member functions defined within their classes. */ struct GTY(()) cp_unparsed_functions_entry { /* Functions with default arguments that require post-processing. Functions appear in this list in declaration order. */ vec<cp_default_arg_entry, va_gc> *funs_with_default_args; /* Functions with defintions that require post-processing. Functions appear in this list in declaration order. */ vec<tree, va_gc> *funs_with_definitions; /* Non-static data members with initializers that require post-processing. FIELD_DECLs appear in this list in declaration order. */ vec<tree, va_gc> *nsdmis; /* Nested classes go in this vector, so that we can do some final processing after parsing any NSDMIs. */ vec<tree, va_gc> *classes; }; /* The status of a tentative parse. */ enum cp_parser_status_kind { /* No errors have occurred. */ CP_PARSER_STATUS_KIND_NO_ERROR, /* An error has occurred. */ CP_PARSER_STATUS_KIND_ERROR, /* We are committed to this tentative parse, whether or not an error has occurred. */ CP_PARSER_STATUS_KIND_COMMITTED }; /* Context that is saved and restored when parsing tentatively. */ struct GTY (()) cp_parser_context { /* If this is a tentative parsing context, the status of the tentative parse. */ enum cp_parser_status_kind status; /* If non-NULL, we have just seen a `x->' or `x.' expression. Names that are looked up in this context must be looked up both in the scope given by OBJECT_TYPE (the type of `x' or `*x') and also in the context of the containing expression. */ tree object_type; /* The next parsing context in the stack. */ struct cp_parser_context *next; }; /* Helper data structure for parsing #pragma omp declare simd, and Cilk Plus SIMD-enabled functions' vector attribute. */ struct cp_omp_declare_simd_data { bool error_seen; /* Set if error has been reported. */ bool fndecl_seen; /* Set if one fn decl/definition has been seen already. */ vec<cp_token_cache_ptr> tokens; tree clauses; }; /* Helper data structure for parsing #pragma acc routine. */ struct cp_oacc_routine_data : cp_omp_declare_simd_data { location_t loc; }; /* The cp_parser structure represents the C++ parser. */ struct GTY(()) cp_parser { /* The lexer from which we are obtaining tokens. */ cp_lexer *lexer; /* The scope in which names should be looked up. If NULL_TREE, then we look up names in the scope that is currently open in the source program. If non-NULL, this is either a TYPE or NAMESPACE_DECL for the scope in which we should look. It can also be ERROR_MARK, when we've parsed a bogus scope. This value is not cleared automatically after a name is looked up, so we must be careful to clear it before starting a new look up sequence. (If it is not cleared, then `X::Y' followed by `Z' will look up `Z' in the scope of `X', rather than the current scope.) Unfortunately, it is difficult to tell when name lookup is complete, because we sometimes peek at a token, look it up, and then decide not to consume it. */ tree scope; /* OBJECT_SCOPE and QUALIFYING_SCOPE give the scopes in which the last lookup took place. OBJECT_SCOPE is used if an expression like "x->y" or "x.y" was used; it gives the type of "*x" or "x", respectively. QUALIFYING_SCOPE is used for an expression of the form "X::Y"; it refers to X. */ tree object_scope; tree qualifying_scope; /* A stack of parsing contexts. All but the bottom entry on the stack will be tentative contexts. We parse tentatively in order to determine which construct is in use in some situations. For example, in order to determine whether a statement is an expression-statement or a declaration-statement we parse it tentatively as a declaration-statement. If that fails, we then reparse the same token stream as an expression-statement. */ cp_parser_context *context; /* True if we are parsing GNU C++. If this flag is not set, then GNU extensions are not recognized. */ bool allow_gnu_extensions_p; /* TRUE if the `>' token should be interpreted as the greater-than operator. FALSE if it is the end of a template-id or template-parameter-list. In C++0x mode, this flag also applies to `>>' tokens, which are viewed as two consecutive `>' tokens when this flag is FALSE. */ bool greater_than_is_operator_p; /* TRUE if default arguments are allowed within a parameter list that starts at this point. FALSE if only a gnu extension makes them permissible. */ bool default_arg_ok_p; /* TRUE if we are parsing an integral constant-expression. See [expr.const] for a precise definition. */ bool integral_constant_expression_p; /* TRUE if we are parsing an integral constant-expression -- but a non-constant expression should be permitted as well. This flag is used when parsing an array bound so that GNU variable-length arrays are tolerated. */ bool allow_non_integral_constant_expression_p; /* TRUE if ALLOW_NON_CONSTANT_EXPRESSION_P is TRUE and something has been seen that makes the expression non-constant. */ bool non_integral_constant_expression_p; /* TRUE if local variable names and `this' are forbidden in the current context. */ bool local_variables_forbidden_p; /* TRUE if the declaration we are parsing is part of a linkage-specification of the form `extern string-literal declaration'. */ bool in_unbraced_linkage_specification_p; /* TRUE if we are presently parsing a declarator, after the direct-declarator. */ bool in_declarator_p; /* TRUE if we are presently parsing a template-argument-list. */ bool in_template_argument_list_p; /* Set to IN_ITERATION_STMT if parsing an iteration-statement, to IN_OMP_BLOCK if parsing OpenMP structured block and IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement, this is bitwise ORed with IN_SWITCH_STMT, unless parsing an iteration-statement, OpenMP block or loop within that switch. */ #define IN_SWITCH_STMT 1 #define IN_ITERATION_STMT 2 #define IN_OMP_BLOCK 4 #define IN_OMP_FOR 8 #define IN_IF_STMT 16 #define IN_CILK_SIMD_FOR 32 #define IN_CILK_SPAWN 64 unsigned char in_statement; /* TRUE if we are presently parsing the body of a switch statement. Note that this doesn't quite overlap with in_statement above. The difference relates to giving the right sets of error messages: "case not in switch" vs "break statement used with OpenMP...". */ bool in_switch_statement_p; /* TRUE if we are parsing a type-id in an expression context. In such a situation, both "type (expr)" and "type (type)" are valid alternatives. */ bool in_type_id_in_expr_p; /* TRUE if we are currently in a header file where declarations are implicitly extern "C". */ bool implicit_extern_c; /* TRUE if strings in expressions should be translated to the execution character set. */ bool translate_strings_p; /* TRUE if we are presently parsing the body of a function, but not a local class. */ bool in_function_body; /* Nonzero if we're processing a __transaction_atomic or __transaction_relaxed statement. */ unsigned char in_transaction; /* TRUE if we can auto-correct a colon to a scope operator. */ bool colon_corrects_to_scope_p; /* TRUE if : doesn't start a class definition. Should be only used together with type_definition_forbidden_message non-NULL, in contexts where new types may not be defined, and the type list is terminated by colon. */ bool colon_doesnt_start_class_def_p; /* If non-NULL, then we are parsing a construct where new type definitions are not permitted. The string stored here will be issued as an error message if a type is defined. */ const char *type_definition_forbidden_message; /* A stack used for member functions of local classes. The lists contained in an individual entry can only be processed once the outermost class being defined is complete. */ vec<cp_unparsed_functions_entry, va_gc> *unparsed_queues; /* The number of classes whose definitions are currently in progress. */ unsigned num_classes_being_defined; /* The number of template parameter lists that apply directly to the current declaration. */ unsigned num_template_parameter_lists; /* When parsing #pragma omp declare simd, this is a pointer to a helper data structure. */ cp_omp_declare_simd_data * GTY((skip)) omp_declare_simd; /* When parsing Cilk Plus SIMD-enabled functions' vector attributes, this is a pointer to a helper data structure. */ cp_omp_declare_simd_data * GTY((skip)) cilk_simd_fn_info; /* When parsing #pragma acc routine, this is a pointer to a helper data structure. */ cp_oacc_routine_data * GTY((skip)) oacc_routine; /* Nonzero if parsing a parameter list where 'auto' should trigger an implicit template parameter. */ bool auto_is_implicit_function_template_parm_p; /* TRUE if the function being declared was made a template due to its parameter list containing generic type specifiers (`auto' or concept identifiers) rather than an explicit template parameter list. */ bool fully_implicit_function_template_p; /* Tracks the function's template parameter list when declaring a function using generic type parameters. This is either a new chain in the case of a fully implicit function template or an extension of the function's existing template parameter list. This is tracked to optimize calls subsequent calls to synthesize_implicit_template_parm during cp_parser_parameter_declaration. */ tree implicit_template_parms; /* The scope into which an implicit template parameter list has been introduced or an existing template parameter list is being extended with implicit template parameters. In most cases this is the sk_function_parms scope containing the use of a generic type. In the case of an out-of-line member definition using a generic type, it is the sk_class scope. */ cp_binding_level* implicit_template_scope; /* True if parsing a result type in a compound requirement. This permits constrained-type-specifiers inside what would normally be a trailing return type. */ bool in_result_type_constraint_p; /* True if a constrained-type-specifier is not allowed in this context e.g., because they could never be deduced. */ int prevent_constrained_type_specifiers; }; /* In parser.c */ extern void debug (cp_token &ref); extern void debug (cp_token *ptr); extern void cp_lexer_debug_tokens (vec<cp_token, va_gc> *); extern void debug (vec<cp_token, va_gc> &ref); extern void debug (vec<cp_token, va_gc> *ptr); extern void cp_debug_parser (FILE *, cp_parser *); extern void debug (cp_parser &ref); extern void debug (cp_parser *ptr); extern bool cp_keyword_starts_decl_specifier_p (enum rid keyword); #endif /* GCC_CP_PARSER_H */
GB_binop__ne_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_uint32 // A.*B function (eWiseMult): GB_AemultB__ne_uint32 // A*D function (colscale): GB_AxD__ne_uint32 // D*A function (rowscale): GB_DxB__ne_uint32 // C+=B function (dense accum): GB_Cdense_accumB__ne_uint32 // C+=b function (dense accum): GB_Cdense_accumb__ne_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_uint32 // C=scalar+B GB_bind1st__ne_uint32 // C=scalar+B' GB_bind1st_tran__ne_uint32 // C=A+scalar GB_bind2nd__ne_uint32 // C=A'+scalar GB_bind2nd_tran__ne_uint32 // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ne_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ne_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__ne_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__ne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi_reentrant.c
#include <err.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif /* * This program approximates pi by computing the area of a quarter circle and * compare that to a square that circumscribes it. It generateds random x and * y coordinates and checks whether these are in the quarter circle or not. * * For performance tests, the application can optionally be called with an * integer value to control the size of the for-loop. An additional optional * positive integer can serve as the seed for the random number generator that * is used to generate the seeds for the re-entrant random number generators * in each of the threads. */ int main(int argc, char *argv[]) { int num_threads = 1; long nr_tries = 10000; if (argc > 1) nr_tries = atol(argv[1]); unsigned int seed = 1234; if (argc > 2) seed = atoi(argv[2]); long nr_successes = 0; unsigned int *seeds; #pragma omp parallel default(none) shared(num_threads, seed, seeds, nr_tries, nr_successes) { int thread_num = 0; #ifdef _OPENMP thread_num = omp_get_thread_num(); #endif // a single thread will determine the number of threads #pragma omp single #ifdef _OPENMP num_threads = omp_get_num_threads(); #endif // printing the number of threads can be done in parallel with initializing seeds #pragma omp sections { #pragma omp section printf("running with %d threads\n", num_threads); #pragma omp section { // a single thread will allocate memory for and initialize the seeds if (!(seeds = (unsigned int *) malloc(num_threads*sizeof(int)))) errx(1, "can not allocate seed array of length %d", num_threads); srand(seed); for (int num = 0; num < num_threads; num++) seeds[num] = rand(); } } printf("thread %d: seed %d\n", thread_num, seeds[thread_num]); // all threads compute the number of successes #pragma omp for reduction(+:nr_successes) for (int try_nr = 0; try_nr < nr_tries; try_nr++) { double x = ((double) rand_r(&(seeds[thread_num])))/((double) RAND_MAX); double y = ((double) rand_r(&(seeds[thread_num])))/((double) RAND_MAX); if (x*x + y*y < 1.0) nr_successes++; } // a single thread deallocates the memory #pragma omp single free(seeds); } double pi = 4.0*((double) nr_successes)/((double) nr_tries); printf("pi = %lf\n", pi); return 0; }
GB_unaryop__identity_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_int16 // op(A') function: GB_tran__identity_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fdtd-2d.orio.par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define tmax T #define nx N #define ny N double ex[nx][ny +1]; double ey[nx +1][ny]; double hz[nx][ny]; void init_arrays() { int i, j; for (i=0; i<nx+1; i++) { for (j=0; j<ny; j++) { ey[i][j] = 0; } } for (i=0; i<nx; i++) { for (j=0; j<ny+1; j++) { ex[i][j] = 0; } } for (j=0; j<ny; j++) { ey[0][j] = ((double)j)/ny; } for (i=0; i<nx; i++) { for (j=0; j<ny; j++) { hz[i][j] = 0; } } } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); /*@ begin PerfTuning ( def build { arg build_command = 'icc -O3 -openmp -lm'; } def performance_counter { arg repetitions = 1; } def performance_params { # [4,8,16,32,64,128]; # [1,4,8,16]; param T1_1[] = [16]; param T1_2[] = [32]; param T1_3[] = [32]; param T2_1[] = [1]; param T2_2[] = [1]; param T2_3[] = [1]; # constraint c1 = (T1_1*T2_1<=1024 and T1_2*T2_2<=1024 and T1_3*T2_3<=1024); # constraint c2 = (T1_1==T1_3 and T2_1==T2_3); param U1[] = [8]; param U2[] = [2]; param U3[] = [4]; constraint c3 = (U1*U2*U3<=256); param PERM[] = [ # [0,1,2], [0,2,1], # [1,0,2], # [1,2,0], # [2,0,1], # [2,1,0], ]; param PAR[] = [True]; param SCREP[] = [False]; param IVEC[] = [True]; } def search { arg algorithm = 'Exhaustive'; # arg algorithm = 'Simplex'; # arg total_runs = 1; # arg algorithm = 'Random'; # arg time_limit = 10; } def input_params { let N=2000; param tmax[] = [500]; param nx[] = [N]; param ny[] = [N]; } def input_vars { decl static double ex[nx][ny+1] = random; decl static double ey[nx+1][ny] = random; decl static double hz[nx][ny] = random; } ) @*/ /**-- (Generated by Orio) Best performance cost: 1.681530 Tuned for specific problem sizes: nx = 2000 ny = 2000 tmax = 500 Best performance parameters: IVEC = True PAR = True PERM = [0, 2, 1] SCREP = False T1_1 = 16 T1_2 = 32 T1_3 = 32 T2_1 = 1 T2_2 = 1 T2_3 = 1 U1 = 8 U2 = 2 U3 = 4 --**/ register int i,j,k,t; register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t; register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6, newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12; register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6, newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12; /*@ begin PolySyn( parallel = PAR; tiles = [T1_1,T1_2,T1_3,T2_1,T2_2,T2_3]; permut = PERM; unroll_factors = [U1,U2,U3]; scalar_replace = SCREP; vectorize = IVEC; profiling_code = 'fdtd-2d_profiling.c'; compile_cmd = 'gcc'; compile_opts = '-lm'; ) @*/#include <math.h> #include <assert.h> #include <omp.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; /* polysyn start */ for (c1=-1;c1<=floord(3*tmax+ny-3,32);c1++) { lb1=max(max(ceild(16*c1-tmax+1,16),0),ceild(16*c1-31,48)); ub1=min(min(floord(16*c1+15,16),floord(16*c1+ny+15,48)),floord(tmax+ny-1,32)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(max(max(max(max(max(max(max(max(max(ceild(-ny-29,32),ceild(-32*c2-61,32)),ceild(32*c2-ny-30,32)),0),ceild(32*c1-64*c2-61,32)),ceild(16*c1-16*c2-31,32)),ceild(512*c1-1504*c2-30*nx-931,32)),ceild(496*c1-1456*c2-30*nx-ny-899,32)),ceild(16*c1-16*c2-ny-29,32)),ceild(32*c1-992*c2-30*nx-ny-899,32)),ceild(32*c1-1024*c2-30*nx-931,32));c3<=min(min(min(floord(16*c1-16*c2+nx+15,32),floord(32*c1+960*c2+31*nx+930,32)),floord(32*c2+nx+30,32)),floord(tmax+nx-1,32));c3++) { if ((c1 <= floord(16*c2+32*c3-nx,16)) && (c2 <= floord(32*c3-nx+ny,32)) && (c3 >= ceild(nx,32))) { for (c8=max(32*c3-nx+1,32*c2);c8<=min(32*c3-nx+ny,32*c2+31);c8++) { hz[nx-1][-32*c3+c8+nx-1]=hz[nx-1][-32*c3+c8+nx-1]-((double)(7))/10*(ey[1+nx-1][-32*c3+c8+nx-1]+ex[nx-1][1+-32*c3+c8+nx-1]-ex[nx-1][-32*c3+c8+nx-1]-ey[nx-1][-32*c3+c8+nx-1]); } } if ((c1 <= floord(48*c2-ny,16)) && (c2 >= max(ceild(ny,32),ceild(32*c3-nx+ny+1,32))) && (nx >= 2)) { for (c9=max(32*c3,32*c2-ny+1);c9<=min(32*c3+31,32*c2+nx-ny);c9++) { hz[-32*c2+c9+ny-1][ny-1]=hz[-32*c2+c9+ny-1][ny-1]-((double)(7))/10*(ey[1+-32*c2+c9+ny-1][ny-1]+ex[-32*c2+c9+ny-1][1+ny-1]-ex[-32*c2+c9+ny-1][ny-1]-ey[-32*c2+c9+ny-1][ny-1]); } } if ((c1 <= floord(48*c2-ny,16)) && (c2 >= max(ceild(ny,32),ceild(32*c3+ny,32))) && (nx == 1) && (ny >= 2)) { hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]); } if ((c1 <= floord(48*c2-1,16)) && (c2 <= floord(c3+1,2)) && (c2 >= max(ceild(32*c3+1,32),1)) && (nx == 1) && (ny == 1)) { hz[0][0]=hz[0][0]-((double)(7))/10*(ey[1+0][0]+ex[0][1+0]-ex[0][0]-ey[0][0]); } if ((c1 == c2+c3) && (nx == 1)) { for (c7=max(max(32*c3,0),32*c2-ny+1);c7<=min(min(min(32*c2-1,tmax-1),16*c3+15),32*c2-ny+31);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]); } hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]); } } if ((c1 == c2+c3) && (nx == 1) && (ny >= 2)) { for (c7=max(max(32*c2,32*c3),0);c7<=min(min(tmax-1,16*c3+15),32*c2-ny+31);c7++) { ey[0][0]=c7; for (c8=c7+1;c8<=c7+ny-1;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]); } hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]); } } if ((c1 == c2+c3) && (nx == 1)) { for (c7=max(max(32*c3,0),32*c2-ny+32);c7<=min(min(tmax-1,16*c3+15),32*c2-1);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]); } } } if ((c1 == c2+c3) && (nx == 1)) { for (c7=max(max(max(32*c2,32*c3),0),32*c2-ny+32);c7<=min(min(tmax-1,16*c3+15),32*c2+30);c7++) { ey[0][0]=c7; for (c8=c7+1;c8<=32*c2+31;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]); } } } if ((c1 == c2+c3) && (nx == 1) && (ny == 1)) { for (c7=max(max(32*c3,0),32*c2);c7<=min(min(tmax-1,16*c3+15),32*c2+30);c7++) { ey[0][0]=c7; hz[0][0]=hz[0][0]-((double)(7))/10*(ey[1+0][0]+ex[0][1+0]-ex[0][0]-ey[0][0]); } } for (c7=max(max(max(0,32*c3-nx+1),32*c2-ny+1),16*c1-16*c2);c7<=min(min(min(min(min(32*c2-1,32*c3-nx+31),32*c3-1),tmax-1),32*c2-ny+31),16*c1-16*c2+15);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { for (c9=32*c3;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } for (c9=32*c3;c9<=c7+nx;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } if (ny >= 2) { for (c7=max(max(max(32*c2,0),32*c3-nx+1),16*c1-16*c2);c7<=min(min(min(min(32*c3-nx+31,32*c3-1),tmax-1),32*c2-ny+31),16*c1-16*c2+15);c7++) { for (c9=32*c3;c9<=c7+nx-1;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=c7+ny-1;c8++) { for (c9=32*c3;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } for (c9=32*c3;c9<=c7+nx;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } for (c7=max(max(max(0,16*c1-16*c2),32*c3-nx+1),32*c2-ny+32);c7<=min(min(min(min(tmax-1,32*c3-1),16*c1-16*c2+15),32*c2-1),32*c3-nx+31);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { for (c9=32*c3;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } } for (c7=max(max(max(32*c3-nx+32,0),32*c2-ny+1),16*c1-16*c2);c7<=min(min(min(min(32*c2-1,32*c3-1),tmax-1),32*c2-ny+31),16*c1-16*c2+15);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { for (c9=32*c3;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } for (c9=32*c3;c9<=32*c3+31;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } for (c7=max(max(max(max(32*c2,0),16*c1-16*c2),32*c3-nx+1),32*c2-ny+32);c7<=min(min(min(min(tmax-1,32*c3-1),32*c2+30),16*c1-16*c2+15),32*c3-nx+31);c7++) { for (c9=32*c3;c9<=c7+nx-1;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=32*c2+31;c8++) { for (c9=32*c3;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } } if (ny >= 2) { for (c7=max(max(max(32*c2,32*c3-nx+32),0),16*c1-16*c2);c7<=min(min(min(32*c3-1,tmax-1),32*c2-ny+31),16*c1-16*c2+15);c7++) { for (c9=32*c3;c9<=32*c3+31;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=c7+ny-1;c8++) { for (c9=32*c3;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } for (c9=32*c3;c9<=32*c3+31;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } /*@ begin Loop( transform Composite( permut = [['c7', 'c9', 'c8']], regtile = (['c7', 'c8', 'c9'],[8, 2, 4]), scalarreplace = (False, 'double'), vector = (True, ['ivdep','vector always'])) for (c7=max(max(max(0,16*c1-16*c2),32*c3-nx+32),32*c2-ny+32);c7<=min(min(min(tmax-1,32*c3-1),16*c1-16*c2+15),32*c2-1);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { for (c9=32*c3;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } ) @*/{ for (c7t=max(max(max(0,16*c1-16*c2),32*c3-nx+32),32*c2-ny+32); c7t<=min(min(min(tmax-1,32*c3-1),16*c1-16*c2+15),32*c2-1)-7; c7t=c7t+8) { for (c9t=32*c3; c9t<=32*c3+28; c9t=c9t+4) { register int cbv_1, cbv_2; cbv_1=32*c2; cbv_2=32*c2+30; #pragma ivdep #pragma vector always for (c8t=cbv_1; c8t<=cbv_2; c8t=c8t+2) { ey[-c7t+c9t][-c7t+c8t]=ey[-c7t+c9t][-c7t+c8t]-((double)(1))/2*(hz[-c7t+c9t][-c7t+c8t]-hz[-c7t+c9t-1][-c7t+c8t]); ey[-c7t+c9t][-c7t+(c8t+1)]=ey[-c7t+c9t][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+c9t][-c7t+(c8t+1)]-hz[-c7t+c9t-1][-c7t+(c8t+1)]); ey[-c7t+(c9t+1)][-c7t+c8t]=ey[-c7t+(c9t+1)][-c7t+c8t]-((double)(1))/2*(hz[-c7t+(c9t+1)][-c7t+c8t]-hz[-c7t+(c9t+1)-1][-c7t+c8t]); ey[-c7t+(c9t+1)][-c7t+(c8t+1)]=ey[-c7t+(c9t+1)][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+(c9t+1)][-c7t+(c8t+1)]-hz[-c7t+(c9t+1)-1][-c7t+(c8t+1)]); ey[-c7t+(c9t+2)][-c7t+c8t]=ey[-c7t+(c9t+2)][-c7t+c8t]-((double)(1))/2*(hz[-c7t+(c9t+2)][-c7t+c8t]-hz[-c7t+(c9t+2)-1][-c7t+c8t]); ey[-c7t+(c9t+2)][-c7t+(c8t+1)]=ey[-c7t+(c9t+2)][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+(c9t+2)][-c7t+(c8t+1)]-hz[-c7t+(c9t+2)-1][-c7t+(c8t+1)]); ey[-c7t+(c9t+3)][-c7t+c8t]=ey[-c7t+(c9t+3)][-c7t+c8t]-((double)(1))/2*(hz[-c7t+(c9t+3)][-c7t+c8t]-hz[-c7t+(c9t+3)-1][-c7t+c8t]); ey[-c7t+(c9t+3)][-c7t+(c8t+1)]=ey[-c7t+(c9t+3)][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+(c9t+3)][-c7t+(c8t+1)]-hz[-c7t+(c9t+3)-1][-c7t+(c8t+1)]); ey[-(c7t+1)+c9t][-(c7t+1)+c8t]=ey[-(c7t+1)+c9t][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+c9t][-(c7t+1)+c8t]-hz[-(c7t+1)+c9t-1][-(c7t+1)+c8t]); ey[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)]=ey[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+c9t-1][-(c7t+1)+(c8t+1)]); ey[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t]=ey[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t]-hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8t]); ey[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)]=ey[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+(c8t+1)]); ey[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t]=ey[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t]-hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8t]); ey[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)]=ey[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+(c8t+1)]); ey[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t]=ey[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t]-hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8t]); ey[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)]=ey[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+(c8t+1)]); ey[-(c7t+2)+c9t][-(c7t+2)+c8t]=ey[-(c7t+2)+c9t][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+c9t][-(c7t+2)+c8t]-hz[-(c7t+2)+c9t-1][-(c7t+2)+c8t]); ey[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)]=ey[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+c9t-1][-(c7t+2)+(c8t+1)]); ey[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t]=ey[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t]-hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8t]); ey[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)]=ey[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+(c8t+1)]); ey[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t]=ey[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t]-hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8t]); ey[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)]=ey[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+(c8t+1)]); ey[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t]=ey[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t]-hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8t]); ey[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)]=ey[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+(c8t+1)]); ey[-(c7t+3)+c9t][-(c7t+3)+c8t]=ey[-(c7t+3)+c9t][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+c9t][-(c7t+3)+c8t]-hz[-(c7t+3)+c9t-1][-(c7t+3)+c8t]); ey[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)]=ey[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+c9t-1][-(c7t+3)+(c8t+1)]); ey[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t]=ey[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t]-hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8t]); ey[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)]=ey[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+(c8t+1)]); ey[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t]=ey[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t]-hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8t]); ey[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)]=ey[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+(c8t+1)]); ey[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t]=ey[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t]-hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8t]); ey[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)]=ey[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+(c8t+1)]); ey[-(c7t+4)+c9t][-(c7t+4)+c8t]=ey[-(c7t+4)+c9t][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+c9t][-(c7t+4)+c8t]-hz[-(c7t+4)+c9t-1][-(c7t+4)+c8t]); ey[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)]=ey[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+c9t-1][-(c7t+4)+(c8t+1)]); ey[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t]=ey[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t]-hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8t]); ey[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)]=ey[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+(c8t+1)]); ey[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t]=ey[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t]-hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8t]); ey[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)]=ey[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+(c8t+1)]); ey[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t]=ey[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t]-hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8t]); ey[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)]=ey[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+(c8t+1)]); ey[-(c7t+5)+c9t][-(c7t+5)+c8t]=ey[-(c7t+5)+c9t][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+c9t][-(c7t+5)+c8t]-hz[-(c7t+5)+c9t-1][-(c7t+5)+c8t]); ey[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)]=ey[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+c9t-1][-(c7t+5)+(c8t+1)]); ey[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t]=ey[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t]-hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8t]); ey[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)]=ey[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+(c8t+1)]); ey[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t]=ey[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t]-hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8t]); ey[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)]=ey[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+(c8t+1)]); ey[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t]=ey[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t]-hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8t]); ey[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)]=ey[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+(c8t+1)]); ey[-(c7t+6)+c9t][-(c7t+6)+c8t]=ey[-(c7t+6)+c9t][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+c9t][-(c7t+6)+c8t]-hz[-(c7t+6)+c9t-1][-(c7t+6)+c8t]); ey[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)]=ey[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+c9t-1][-(c7t+6)+(c8t+1)]); ey[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t]=ey[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t]-hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8t]); ey[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)]=ey[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+(c8t+1)]); ey[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t]=ey[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t]-hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8t]); ey[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)]=ey[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+(c8t+1)]); ey[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t]=ey[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t]-hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8t]); ey[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)]=ey[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+(c8t+1)]); ey[-(c7t+7)+c9t][-(c7t+7)+c8t]=ey[-(c7t+7)+c9t][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+c9t][-(c7t+7)+c8t]-hz[-(c7t+7)+c9t-1][-(c7t+7)+c8t]); ey[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)]=ey[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+c9t-1][-(c7t+7)+(c8t+1)]); ey[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t]=ey[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t]-hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8t]); ey[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)]=ey[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+(c8t+1)]); ey[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t]=ey[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t]-hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8t]); ey[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)]=ey[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+(c8t+1)]); ey[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t]=ey[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t]-hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8t]); ey[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)]=ey[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+(c8t+1)]); ex[-c7t+c9t][-c7t+c8t]=ex[-c7t+c9t][-c7t+c8t]-((double)(1))/2*(hz[-c7t+c9t][-c7t+c8t]-hz[-c7t+c9t][-c7t+c8t-1]); ex[-c7t+c9t][-c7t+(c8t+1)]=ex[-c7t+c9t][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+c9t][-c7t+(c8t+1)]-hz[-c7t+c9t][-c7t+(c8t+1)-1]); ex[-c7t+(c9t+1)][-c7t+c8t]=ex[-c7t+(c9t+1)][-c7t+c8t]-((double)(1))/2*(hz[-c7t+(c9t+1)][-c7t+c8t]-hz[-c7t+(c9t+1)][-c7t+c8t-1]); ex[-c7t+(c9t+1)][-c7t+(c8t+1)]=ex[-c7t+(c9t+1)][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+(c9t+1)][-c7t+(c8t+1)]-hz[-c7t+(c9t+1)][-c7t+(c8t+1)-1]); ex[-c7t+(c9t+2)][-c7t+c8t]=ex[-c7t+(c9t+2)][-c7t+c8t]-((double)(1))/2*(hz[-c7t+(c9t+2)][-c7t+c8t]-hz[-c7t+(c9t+2)][-c7t+c8t-1]); ex[-c7t+(c9t+2)][-c7t+(c8t+1)]=ex[-c7t+(c9t+2)][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+(c9t+2)][-c7t+(c8t+1)]-hz[-c7t+(c9t+2)][-c7t+(c8t+1)-1]); ex[-c7t+(c9t+3)][-c7t+c8t]=ex[-c7t+(c9t+3)][-c7t+c8t]-((double)(1))/2*(hz[-c7t+(c9t+3)][-c7t+c8t]-hz[-c7t+(c9t+3)][-c7t+c8t-1]); ex[-c7t+(c9t+3)][-c7t+(c8t+1)]=ex[-c7t+(c9t+3)][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+(c9t+3)][-c7t+(c8t+1)]-hz[-c7t+(c9t+3)][-c7t+(c8t+1)-1]); ex[-(c7t+1)+c9t][-(c7t+1)+c8t]=ex[-(c7t+1)+c9t][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+c9t][-(c7t+1)+c8t]-hz[-(c7t+1)+c9t][-(c7t+1)+c8t-1]); ex[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)]=ex[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+c9t][-(c7t+1)+(c8t+1)-1]); ex[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t]=ex[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t]-hz[-(c7t+1)+(c9t+1)][-(c7t+1)+c8t-1]); ex[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)]=ex[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+(c9t+1)][-(c7t+1)+(c8t+1)-1]); ex[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t]=ex[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t]-hz[-(c7t+1)+(c9t+2)][-(c7t+1)+c8t-1]); ex[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)]=ex[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+(c9t+2)][-(c7t+1)+(c8t+1)-1]); ex[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t]=ex[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t]-hz[-(c7t+1)+(c9t+3)][-(c7t+1)+c8t-1]); ex[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)]=ex[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+(c9t+3)][-(c7t+1)+(c8t+1)-1]); ex[-(c7t+2)+c9t][-(c7t+2)+c8t]=ex[-(c7t+2)+c9t][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+c9t][-(c7t+2)+c8t]-hz[-(c7t+2)+c9t][-(c7t+2)+c8t-1]); ex[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)]=ex[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+c9t][-(c7t+2)+(c8t+1)-1]); ex[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t]=ex[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t]-hz[-(c7t+2)+(c9t+1)][-(c7t+2)+c8t-1]); ex[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)]=ex[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+(c9t+1)][-(c7t+2)+(c8t+1)-1]); ex[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t]=ex[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t]-hz[-(c7t+2)+(c9t+2)][-(c7t+2)+c8t-1]); ex[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)]=ex[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+(c9t+2)][-(c7t+2)+(c8t+1)-1]); ex[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t]=ex[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t]-hz[-(c7t+2)+(c9t+3)][-(c7t+2)+c8t-1]); ex[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)]=ex[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+(c9t+3)][-(c7t+2)+(c8t+1)-1]); ex[-(c7t+3)+c9t][-(c7t+3)+c8t]=ex[-(c7t+3)+c9t][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+c9t][-(c7t+3)+c8t]-hz[-(c7t+3)+c9t][-(c7t+3)+c8t-1]); ex[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)]=ex[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+c9t][-(c7t+3)+(c8t+1)-1]); ex[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t]=ex[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t]-hz[-(c7t+3)+(c9t+1)][-(c7t+3)+c8t-1]); ex[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)]=ex[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+(c9t+1)][-(c7t+3)+(c8t+1)-1]); ex[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t]=ex[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t]-hz[-(c7t+3)+(c9t+2)][-(c7t+3)+c8t-1]); ex[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)]=ex[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+(c9t+2)][-(c7t+3)+(c8t+1)-1]); ex[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t]=ex[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t]-hz[-(c7t+3)+(c9t+3)][-(c7t+3)+c8t-1]); ex[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)]=ex[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+(c9t+3)][-(c7t+3)+(c8t+1)-1]); ex[-(c7t+4)+c9t][-(c7t+4)+c8t]=ex[-(c7t+4)+c9t][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+c9t][-(c7t+4)+c8t]-hz[-(c7t+4)+c9t][-(c7t+4)+c8t-1]); ex[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)]=ex[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+c9t][-(c7t+4)+(c8t+1)-1]); ex[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t]=ex[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t]-hz[-(c7t+4)+(c9t+1)][-(c7t+4)+c8t-1]); ex[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)]=ex[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+(c9t+1)][-(c7t+4)+(c8t+1)-1]); ex[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t]=ex[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t]-hz[-(c7t+4)+(c9t+2)][-(c7t+4)+c8t-1]); ex[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)]=ex[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+(c9t+2)][-(c7t+4)+(c8t+1)-1]); ex[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t]=ex[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t]-hz[-(c7t+4)+(c9t+3)][-(c7t+4)+c8t-1]); ex[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)]=ex[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+(c9t+3)][-(c7t+4)+(c8t+1)-1]); ex[-(c7t+5)+c9t][-(c7t+5)+c8t]=ex[-(c7t+5)+c9t][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+c9t][-(c7t+5)+c8t]-hz[-(c7t+5)+c9t][-(c7t+5)+c8t-1]); ex[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)]=ex[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+c9t][-(c7t+5)+(c8t+1)-1]); ex[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t]=ex[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t]-hz[-(c7t+5)+(c9t+1)][-(c7t+5)+c8t-1]); ex[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)]=ex[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+(c9t+1)][-(c7t+5)+(c8t+1)-1]); ex[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t]=ex[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t]-hz[-(c7t+5)+(c9t+2)][-(c7t+5)+c8t-1]); ex[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)]=ex[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+(c9t+2)][-(c7t+5)+(c8t+1)-1]); ex[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t]=ex[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t]-hz[-(c7t+5)+(c9t+3)][-(c7t+5)+c8t-1]); ex[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)]=ex[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+(c9t+3)][-(c7t+5)+(c8t+1)-1]); ex[-(c7t+6)+c9t][-(c7t+6)+c8t]=ex[-(c7t+6)+c9t][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+c9t][-(c7t+6)+c8t]-hz[-(c7t+6)+c9t][-(c7t+6)+c8t-1]); ex[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)]=ex[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+c9t][-(c7t+6)+(c8t+1)-1]); ex[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t]=ex[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t]-hz[-(c7t+6)+(c9t+1)][-(c7t+6)+c8t-1]); ex[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)]=ex[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+(c9t+1)][-(c7t+6)+(c8t+1)-1]); ex[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t]=ex[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t]-hz[-(c7t+6)+(c9t+2)][-(c7t+6)+c8t-1]); ex[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)]=ex[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+(c9t+2)][-(c7t+6)+(c8t+1)-1]); ex[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t]=ex[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t]-hz[-(c7t+6)+(c9t+3)][-(c7t+6)+c8t-1]); ex[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)]=ex[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+(c9t+3)][-(c7t+6)+(c8t+1)-1]); ex[-(c7t+7)+c9t][-(c7t+7)+c8t]=ex[-(c7t+7)+c9t][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+c9t][-(c7t+7)+c8t]-hz[-(c7t+7)+c9t][-(c7t+7)+c8t-1]); ex[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)]=ex[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+c9t][-(c7t+7)+(c8t+1)-1]); ex[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t]=ex[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t]-hz[-(c7t+7)+(c9t+1)][-(c7t+7)+c8t-1]); ex[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)]=ex[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+(c9t+1)][-(c7t+7)+(c8t+1)-1]); ex[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t]=ex[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t]-hz[-(c7t+7)+(c9t+2)][-(c7t+7)+c8t-1]); ex[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)]=ex[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+(c9t+2)][-(c7t+7)+(c8t+1)-1]); ex[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t]=ex[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t]-hz[-(c7t+7)+(c9t+3)][-(c7t+7)+c8t-1]); ex[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)]=ex[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+(c9t+3)][-(c7t+7)+(c8t+1)-1]); hz[-c7t+c9t-1][-c7t+c8t-1]=hz[-c7t+c9t-1][-c7t+c8t-1]-((double)(7))/10*(ey[1+-c7t+c9t-1][-c7t+c8t-1]+ex[-c7t+c9t-1][1+-c7t+c8t-1]-ex[-c7t+c9t-1][-c7t+c8t-1]-ey[-c7t+c9t-1][-c7t+c8t-1]); hz[-c7t+c9t-1][-c7t+(c8t+1)-1]=hz[-c7t+c9t-1][-c7t+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7t+c9t-1][-c7t+(c8t+1)-1]+ex[-c7t+c9t-1][1+-c7t+(c8t+1)-1]-ex[-c7t+c9t-1][-c7t+(c8t+1)-1]-ey[-c7t+c9t-1][-c7t+(c8t+1)-1]); hz[-c7t+(c9t+1)-1][-c7t+c8t-1]=hz[-c7t+(c9t+1)-1][-c7t+c8t-1]-((double)(7))/10*(ey[1+-c7t+(c9t+1)-1][-c7t+c8t-1]+ex[-c7t+(c9t+1)-1][1+-c7t+c8t-1]-ex[-c7t+(c9t+1)-1][-c7t+c8t-1]-ey[-c7t+(c9t+1)-1][-c7t+c8t-1]); hz[-c7t+(c9t+1)-1][-c7t+(c8t+1)-1]=hz[-c7t+(c9t+1)-1][-c7t+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7t+(c9t+1)-1][-c7t+(c8t+1)-1]+ex[-c7t+(c9t+1)-1][1+-c7t+(c8t+1)-1]-ex[-c7t+(c9t+1)-1][-c7t+(c8t+1)-1]-ey[-c7t+(c9t+1)-1][-c7t+(c8t+1)-1]); hz[-c7t+(c9t+2)-1][-c7t+c8t-1]=hz[-c7t+(c9t+2)-1][-c7t+c8t-1]-((double)(7))/10*(ey[1+-c7t+(c9t+2)-1][-c7t+c8t-1]+ex[-c7t+(c9t+2)-1][1+-c7t+c8t-1]-ex[-c7t+(c9t+2)-1][-c7t+c8t-1]-ey[-c7t+(c9t+2)-1][-c7t+c8t-1]); hz[-c7t+(c9t+2)-1][-c7t+(c8t+1)-1]=hz[-c7t+(c9t+2)-1][-c7t+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7t+(c9t+2)-1][-c7t+(c8t+1)-1]+ex[-c7t+(c9t+2)-1][1+-c7t+(c8t+1)-1]-ex[-c7t+(c9t+2)-1][-c7t+(c8t+1)-1]-ey[-c7t+(c9t+2)-1][-c7t+(c8t+1)-1]); hz[-c7t+(c9t+3)-1][-c7t+c8t-1]=hz[-c7t+(c9t+3)-1][-c7t+c8t-1]-((double)(7))/10*(ey[1+-c7t+(c9t+3)-1][-c7t+c8t-1]+ex[-c7t+(c9t+3)-1][1+-c7t+c8t-1]-ex[-c7t+(c9t+3)-1][-c7t+c8t-1]-ey[-c7t+(c9t+3)-1][-c7t+c8t-1]); hz[-c7t+(c9t+3)-1][-c7t+(c8t+1)-1]=hz[-c7t+(c9t+3)-1][-c7t+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7t+(c9t+3)-1][-c7t+(c8t+1)-1]+ex[-c7t+(c9t+3)-1][1+-c7t+(c8t+1)-1]-ex[-c7t+(c9t+3)-1][-c7t+(c8t+1)-1]-ey[-c7t+(c9t+3)-1][-c7t+(c8t+1)-1]); hz[-(c7t+1)+c9t-1][-(c7t+1)+c8t-1]=hz[-(c7t+1)+c9t-1][-(c7t+1)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+1)+c9t-1][-(c7t+1)+c8t-1]+ex[-(c7t+1)+c9t-1][1+-(c7t+1)+c8t-1]-ex[-(c7t+1)+c9t-1][-(c7t+1)+c8t-1]-ey[-(c7t+1)+c9t-1][-(c7t+1)+c8t-1]); hz[-(c7t+1)+c9t-1][-(c7t+1)+(c8t+1)-1]=hz[-(c7t+1)+c9t-1][-(c7t+1)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+1)+c9t-1][-(c7t+1)+(c8t+1)-1]+ex[-(c7t+1)+c9t-1][1+-(c7t+1)+(c8t+1)-1]-ex[-(c7t+1)+c9t-1][-(c7t+1)+(c8t+1)-1]-ey[-(c7t+1)+c9t-1][-(c7t+1)+(c8t+1)-1]); hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8t-1]=hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8t-1]+ex[-(c7t+1)+(c9t+1)-1][1+-(c7t+1)+c8t-1]-ex[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8t-1]-ey[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8t-1]); hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+(c8t+1)-1]=hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+1)-1][-(c7t+1)+(c8t+1)-1]+ex[-(c7t+1)+(c9t+1)-1][1+-(c7t+1)+(c8t+1)-1]-ex[-(c7t+1)+(c9t+1)-1][-(c7t+1)+(c8t+1)-1]-ey[-(c7t+1)+(c9t+1)-1][-(c7t+1)+(c8t+1)-1]); hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8t-1]=hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8t-1]+ex[-(c7t+1)+(c9t+2)-1][1+-(c7t+1)+c8t-1]-ex[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8t-1]-ey[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8t-1]); hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+(c8t+1)-1]=hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+2)-1][-(c7t+1)+(c8t+1)-1]+ex[-(c7t+1)+(c9t+2)-1][1+-(c7t+1)+(c8t+1)-1]-ex[-(c7t+1)+(c9t+2)-1][-(c7t+1)+(c8t+1)-1]-ey[-(c7t+1)+(c9t+2)-1][-(c7t+1)+(c8t+1)-1]); hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8t-1]=hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8t-1]+ex[-(c7t+1)+(c9t+3)-1][1+-(c7t+1)+c8t-1]-ex[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8t-1]-ey[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8t-1]); hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+(c8t+1)-1]=hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+3)-1][-(c7t+1)+(c8t+1)-1]+ex[-(c7t+1)+(c9t+3)-1][1+-(c7t+1)+(c8t+1)-1]-ex[-(c7t+1)+(c9t+3)-1][-(c7t+1)+(c8t+1)-1]-ey[-(c7t+1)+(c9t+3)-1][-(c7t+1)+(c8t+1)-1]); hz[-(c7t+2)+c9t-1][-(c7t+2)+c8t-1]=hz[-(c7t+2)+c9t-1][-(c7t+2)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+2)+c9t-1][-(c7t+2)+c8t-1]+ex[-(c7t+2)+c9t-1][1+-(c7t+2)+c8t-1]-ex[-(c7t+2)+c9t-1][-(c7t+2)+c8t-1]-ey[-(c7t+2)+c9t-1][-(c7t+2)+c8t-1]); hz[-(c7t+2)+c9t-1][-(c7t+2)+(c8t+1)-1]=hz[-(c7t+2)+c9t-1][-(c7t+2)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+2)+c9t-1][-(c7t+2)+(c8t+1)-1]+ex[-(c7t+2)+c9t-1][1+-(c7t+2)+(c8t+1)-1]-ex[-(c7t+2)+c9t-1][-(c7t+2)+(c8t+1)-1]-ey[-(c7t+2)+c9t-1][-(c7t+2)+(c8t+1)-1]); hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8t-1]=hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8t-1]+ex[-(c7t+2)+(c9t+1)-1][1+-(c7t+2)+c8t-1]-ex[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8t-1]-ey[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8t-1]); hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+(c8t+1)-1]=hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+1)-1][-(c7t+2)+(c8t+1)-1]+ex[-(c7t+2)+(c9t+1)-1][1+-(c7t+2)+(c8t+1)-1]-ex[-(c7t+2)+(c9t+1)-1][-(c7t+2)+(c8t+1)-1]-ey[-(c7t+2)+(c9t+1)-1][-(c7t+2)+(c8t+1)-1]); hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8t-1]=hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8t-1]+ex[-(c7t+2)+(c9t+2)-1][1+-(c7t+2)+c8t-1]-ex[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8t-1]-ey[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8t-1]); hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+(c8t+1)-1]=hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+2)-1][-(c7t+2)+(c8t+1)-1]+ex[-(c7t+2)+(c9t+2)-1][1+-(c7t+2)+(c8t+1)-1]-ex[-(c7t+2)+(c9t+2)-1][-(c7t+2)+(c8t+1)-1]-ey[-(c7t+2)+(c9t+2)-1][-(c7t+2)+(c8t+1)-1]); hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8t-1]=hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8t-1]+ex[-(c7t+2)+(c9t+3)-1][1+-(c7t+2)+c8t-1]-ex[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8t-1]-ey[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8t-1]); hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+(c8t+1)-1]=hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+3)-1][-(c7t+2)+(c8t+1)-1]+ex[-(c7t+2)+(c9t+3)-1][1+-(c7t+2)+(c8t+1)-1]-ex[-(c7t+2)+(c9t+3)-1][-(c7t+2)+(c8t+1)-1]-ey[-(c7t+2)+(c9t+3)-1][-(c7t+2)+(c8t+1)-1]); hz[-(c7t+3)+c9t-1][-(c7t+3)+c8t-1]=hz[-(c7t+3)+c9t-1][-(c7t+3)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+3)+c9t-1][-(c7t+3)+c8t-1]+ex[-(c7t+3)+c9t-1][1+-(c7t+3)+c8t-1]-ex[-(c7t+3)+c9t-1][-(c7t+3)+c8t-1]-ey[-(c7t+3)+c9t-1][-(c7t+3)+c8t-1]); hz[-(c7t+3)+c9t-1][-(c7t+3)+(c8t+1)-1]=hz[-(c7t+3)+c9t-1][-(c7t+3)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+3)+c9t-1][-(c7t+3)+(c8t+1)-1]+ex[-(c7t+3)+c9t-1][1+-(c7t+3)+(c8t+1)-1]-ex[-(c7t+3)+c9t-1][-(c7t+3)+(c8t+1)-1]-ey[-(c7t+3)+c9t-1][-(c7t+3)+(c8t+1)-1]); hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8t-1]=hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8t-1]+ex[-(c7t+3)+(c9t+1)-1][1+-(c7t+3)+c8t-1]-ex[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8t-1]-ey[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8t-1]); hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+(c8t+1)-1]=hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+1)-1][-(c7t+3)+(c8t+1)-1]+ex[-(c7t+3)+(c9t+1)-1][1+-(c7t+3)+(c8t+1)-1]-ex[-(c7t+3)+(c9t+1)-1][-(c7t+3)+(c8t+1)-1]-ey[-(c7t+3)+(c9t+1)-1][-(c7t+3)+(c8t+1)-1]); hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8t-1]=hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8t-1]+ex[-(c7t+3)+(c9t+2)-1][1+-(c7t+3)+c8t-1]-ex[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8t-1]-ey[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8t-1]); hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+(c8t+1)-1]=hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+2)-1][-(c7t+3)+(c8t+1)-1]+ex[-(c7t+3)+(c9t+2)-1][1+-(c7t+3)+(c8t+1)-1]-ex[-(c7t+3)+(c9t+2)-1][-(c7t+3)+(c8t+1)-1]-ey[-(c7t+3)+(c9t+2)-1][-(c7t+3)+(c8t+1)-1]); hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8t-1]=hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8t-1]+ex[-(c7t+3)+(c9t+3)-1][1+-(c7t+3)+c8t-1]-ex[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8t-1]-ey[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8t-1]); hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+(c8t+1)-1]=hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+3)-1][-(c7t+3)+(c8t+1)-1]+ex[-(c7t+3)+(c9t+3)-1][1+-(c7t+3)+(c8t+1)-1]-ex[-(c7t+3)+(c9t+3)-1][-(c7t+3)+(c8t+1)-1]-ey[-(c7t+3)+(c9t+3)-1][-(c7t+3)+(c8t+1)-1]); hz[-(c7t+4)+c9t-1][-(c7t+4)+c8t-1]=hz[-(c7t+4)+c9t-1][-(c7t+4)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+4)+c9t-1][-(c7t+4)+c8t-1]+ex[-(c7t+4)+c9t-1][1+-(c7t+4)+c8t-1]-ex[-(c7t+4)+c9t-1][-(c7t+4)+c8t-1]-ey[-(c7t+4)+c9t-1][-(c7t+4)+c8t-1]); hz[-(c7t+4)+c9t-1][-(c7t+4)+(c8t+1)-1]=hz[-(c7t+4)+c9t-1][-(c7t+4)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+4)+c9t-1][-(c7t+4)+(c8t+1)-1]+ex[-(c7t+4)+c9t-1][1+-(c7t+4)+(c8t+1)-1]-ex[-(c7t+4)+c9t-1][-(c7t+4)+(c8t+1)-1]-ey[-(c7t+4)+c9t-1][-(c7t+4)+(c8t+1)-1]); hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8t-1]=hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8t-1]+ex[-(c7t+4)+(c9t+1)-1][1+-(c7t+4)+c8t-1]-ex[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8t-1]-ey[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8t-1]); hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+(c8t+1)-1]=hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+1)-1][-(c7t+4)+(c8t+1)-1]+ex[-(c7t+4)+(c9t+1)-1][1+-(c7t+4)+(c8t+1)-1]-ex[-(c7t+4)+(c9t+1)-1][-(c7t+4)+(c8t+1)-1]-ey[-(c7t+4)+(c9t+1)-1][-(c7t+4)+(c8t+1)-1]); hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8t-1]=hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8t-1]+ex[-(c7t+4)+(c9t+2)-1][1+-(c7t+4)+c8t-1]-ex[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8t-1]-ey[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8t-1]); hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+(c8t+1)-1]=hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+2)-1][-(c7t+4)+(c8t+1)-1]+ex[-(c7t+4)+(c9t+2)-1][1+-(c7t+4)+(c8t+1)-1]-ex[-(c7t+4)+(c9t+2)-1][-(c7t+4)+(c8t+1)-1]-ey[-(c7t+4)+(c9t+2)-1][-(c7t+4)+(c8t+1)-1]); hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8t-1]=hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8t-1]+ex[-(c7t+4)+(c9t+3)-1][1+-(c7t+4)+c8t-1]-ex[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8t-1]-ey[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8t-1]); hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+(c8t+1)-1]=hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+3)-1][-(c7t+4)+(c8t+1)-1]+ex[-(c7t+4)+(c9t+3)-1][1+-(c7t+4)+(c8t+1)-1]-ex[-(c7t+4)+(c9t+3)-1][-(c7t+4)+(c8t+1)-1]-ey[-(c7t+4)+(c9t+3)-1][-(c7t+4)+(c8t+1)-1]); hz[-(c7t+5)+c9t-1][-(c7t+5)+c8t-1]=hz[-(c7t+5)+c9t-1][-(c7t+5)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+5)+c9t-1][-(c7t+5)+c8t-1]+ex[-(c7t+5)+c9t-1][1+-(c7t+5)+c8t-1]-ex[-(c7t+5)+c9t-1][-(c7t+5)+c8t-1]-ey[-(c7t+5)+c9t-1][-(c7t+5)+c8t-1]); hz[-(c7t+5)+c9t-1][-(c7t+5)+(c8t+1)-1]=hz[-(c7t+5)+c9t-1][-(c7t+5)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+5)+c9t-1][-(c7t+5)+(c8t+1)-1]+ex[-(c7t+5)+c9t-1][1+-(c7t+5)+(c8t+1)-1]-ex[-(c7t+5)+c9t-1][-(c7t+5)+(c8t+1)-1]-ey[-(c7t+5)+c9t-1][-(c7t+5)+(c8t+1)-1]); hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8t-1]=hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8t-1]+ex[-(c7t+5)+(c9t+1)-1][1+-(c7t+5)+c8t-1]-ex[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8t-1]-ey[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8t-1]); hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+(c8t+1)-1]=hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+1)-1][-(c7t+5)+(c8t+1)-1]+ex[-(c7t+5)+(c9t+1)-1][1+-(c7t+5)+(c8t+1)-1]-ex[-(c7t+5)+(c9t+1)-1][-(c7t+5)+(c8t+1)-1]-ey[-(c7t+5)+(c9t+1)-1][-(c7t+5)+(c8t+1)-1]); hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8t-1]=hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8t-1]+ex[-(c7t+5)+(c9t+2)-1][1+-(c7t+5)+c8t-1]-ex[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8t-1]-ey[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8t-1]); hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+(c8t+1)-1]=hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+2)-1][-(c7t+5)+(c8t+1)-1]+ex[-(c7t+5)+(c9t+2)-1][1+-(c7t+5)+(c8t+1)-1]-ex[-(c7t+5)+(c9t+2)-1][-(c7t+5)+(c8t+1)-1]-ey[-(c7t+5)+(c9t+2)-1][-(c7t+5)+(c8t+1)-1]); hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8t-1]=hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8t-1]+ex[-(c7t+5)+(c9t+3)-1][1+-(c7t+5)+c8t-1]-ex[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8t-1]-ey[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8t-1]); hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+(c8t+1)-1]=hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+3)-1][-(c7t+5)+(c8t+1)-1]+ex[-(c7t+5)+(c9t+3)-1][1+-(c7t+5)+(c8t+1)-1]-ex[-(c7t+5)+(c9t+3)-1][-(c7t+5)+(c8t+1)-1]-ey[-(c7t+5)+(c9t+3)-1][-(c7t+5)+(c8t+1)-1]); hz[-(c7t+6)+c9t-1][-(c7t+6)+c8t-1]=hz[-(c7t+6)+c9t-1][-(c7t+6)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+6)+c9t-1][-(c7t+6)+c8t-1]+ex[-(c7t+6)+c9t-1][1+-(c7t+6)+c8t-1]-ex[-(c7t+6)+c9t-1][-(c7t+6)+c8t-1]-ey[-(c7t+6)+c9t-1][-(c7t+6)+c8t-1]); hz[-(c7t+6)+c9t-1][-(c7t+6)+(c8t+1)-1]=hz[-(c7t+6)+c9t-1][-(c7t+6)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+6)+c9t-1][-(c7t+6)+(c8t+1)-1]+ex[-(c7t+6)+c9t-1][1+-(c7t+6)+(c8t+1)-1]-ex[-(c7t+6)+c9t-1][-(c7t+6)+(c8t+1)-1]-ey[-(c7t+6)+c9t-1][-(c7t+6)+(c8t+1)-1]); hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8t-1]=hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8t-1]+ex[-(c7t+6)+(c9t+1)-1][1+-(c7t+6)+c8t-1]-ex[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8t-1]-ey[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8t-1]); hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+(c8t+1)-1]=hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+1)-1][-(c7t+6)+(c8t+1)-1]+ex[-(c7t+6)+(c9t+1)-1][1+-(c7t+6)+(c8t+1)-1]-ex[-(c7t+6)+(c9t+1)-1][-(c7t+6)+(c8t+1)-1]-ey[-(c7t+6)+(c9t+1)-1][-(c7t+6)+(c8t+1)-1]); hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8t-1]=hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8t-1]+ex[-(c7t+6)+(c9t+2)-1][1+-(c7t+6)+c8t-1]-ex[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8t-1]-ey[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8t-1]); hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+(c8t+1)-1]=hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+2)-1][-(c7t+6)+(c8t+1)-1]+ex[-(c7t+6)+(c9t+2)-1][1+-(c7t+6)+(c8t+1)-1]-ex[-(c7t+6)+(c9t+2)-1][-(c7t+6)+(c8t+1)-1]-ey[-(c7t+6)+(c9t+2)-1][-(c7t+6)+(c8t+1)-1]); hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8t-1]=hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8t-1]+ex[-(c7t+6)+(c9t+3)-1][1+-(c7t+6)+c8t-1]-ex[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8t-1]-ey[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8t-1]); hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+(c8t+1)-1]=hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+3)-1][-(c7t+6)+(c8t+1)-1]+ex[-(c7t+6)+(c9t+3)-1][1+-(c7t+6)+(c8t+1)-1]-ex[-(c7t+6)+(c9t+3)-1][-(c7t+6)+(c8t+1)-1]-ey[-(c7t+6)+(c9t+3)-1][-(c7t+6)+(c8t+1)-1]); hz[-(c7t+7)+c9t-1][-(c7t+7)+c8t-1]=hz[-(c7t+7)+c9t-1][-(c7t+7)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+7)+c9t-1][-(c7t+7)+c8t-1]+ex[-(c7t+7)+c9t-1][1+-(c7t+7)+c8t-1]-ex[-(c7t+7)+c9t-1][-(c7t+7)+c8t-1]-ey[-(c7t+7)+c9t-1][-(c7t+7)+c8t-1]); hz[-(c7t+7)+c9t-1][-(c7t+7)+(c8t+1)-1]=hz[-(c7t+7)+c9t-1][-(c7t+7)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+7)+c9t-1][-(c7t+7)+(c8t+1)-1]+ex[-(c7t+7)+c9t-1][1+-(c7t+7)+(c8t+1)-1]-ex[-(c7t+7)+c9t-1][-(c7t+7)+(c8t+1)-1]-ey[-(c7t+7)+c9t-1][-(c7t+7)+(c8t+1)-1]); hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8t-1]=hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8t-1]+ex[-(c7t+7)+(c9t+1)-1][1+-(c7t+7)+c8t-1]-ex[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8t-1]-ey[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8t-1]); hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+(c8t+1)-1]=hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+1)-1][-(c7t+7)+(c8t+1)-1]+ex[-(c7t+7)+(c9t+1)-1][1+-(c7t+7)+(c8t+1)-1]-ex[-(c7t+7)+(c9t+1)-1][-(c7t+7)+(c8t+1)-1]-ey[-(c7t+7)+(c9t+1)-1][-(c7t+7)+(c8t+1)-1]); hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8t-1]=hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8t-1]+ex[-(c7t+7)+(c9t+2)-1][1+-(c7t+7)+c8t-1]-ex[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8t-1]-ey[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8t-1]); hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+(c8t+1)-1]=hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+2)-1][-(c7t+7)+(c8t+1)-1]+ex[-(c7t+7)+(c9t+2)-1][1+-(c7t+7)+(c8t+1)-1]-ex[-(c7t+7)+(c9t+2)-1][-(c7t+7)+(c8t+1)-1]-ey[-(c7t+7)+(c9t+2)-1][-(c7t+7)+(c8t+1)-1]); hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8t-1]=hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8t-1]+ex[-(c7t+7)+(c9t+3)-1][1+-(c7t+7)+c8t-1]-ex[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8t-1]-ey[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8t-1]); hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+(c8t+1)-1]=hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+3)-1][-(c7t+7)+(c8t+1)-1]+ex[-(c7t+7)+(c9t+3)-1][1+-(c7t+7)+(c8t+1)-1]-ex[-(c7t+7)+(c9t+3)-1][-(c7t+7)+(c8t+1)-1]-ey[-(c7t+7)+(c9t+3)-1][-(c7t+7)+(c8t+1)-1]); } register int cbv_3; cbv_3=32*c2+31; #pragma ivdep #pragma vector always for (c8=c8t; c8<=cbv_3; c8=c8+1) { ey[-c7t+c9t][-c7t+c8]=ey[-c7t+c9t][-c7t+c8]-((double)(1))/2*(hz[-c7t+c9t][-c7t+c8]-hz[-c7t+c9t-1][-c7t+c8]); ey[-c7t+(c9t+1)][-c7t+c8]=ey[-c7t+(c9t+1)][-c7t+c8]-((double)(1))/2*(hz[-c7t+(c9t+1)][-c7t+c8]-hz[-c7t+(c9t+1)-1][-c7t+c8]); ey[-c7t+(c9t+2)][-c7t+c8]=ey[-c7t+(c9t+2)][-c7t+c8]-((double)(1))/2*(hz[-c7t+(c9t+2)][-c7t+c8]-hz[-c7t+(c9t+2)-1][-c7t+c8]); ey[-c7t+(c9t+3)][-c7t+c8]=ey[-c7t+(c9t+3)][-c7t+c8]-((double)(1))/2*(hz[-c7t+(c9t+3)][-c7t+c8]-hz[-c7t+(c9t+3)-1][-c7t+c8]); ey[-(c7t+1)+c9t][-(c7t+1)+c8]=ey[-(c7t+1)+c9t][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+c9t][-(c7t+1)+c8]-hz[-(c7t+1)+c9t-1][-(c7t+1)+c8]); ey[-(c7t+1)+(c9t+1)][-(c7t+1)+c8]=ey[-(c7t+1)+(c9t+1)][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+(c9t+1)][-(c7t+1)+c8]-hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8]); ey[-(c7t+1)+(c9t+2)][-(c7t+1)+c8]=ey[-(c7t+1)+(c9t+2)][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+(c9t+2)][-(c7t+1)+c8]-hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8]); ey[-(c7t+1)+(c9t+3)][-(c7t+1)+c8]=ey[-(c7t+1)+(c9t+3)][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+(c9t+3)][-(c7t+1)+c8]-hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8]); ey[-(c7t+2)+c9t][-(c7t+2)+c8]=ey[-(c7t+2)+c9t][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+c9t][-(c7t+2)+c8]-hz[-(c7t+2)+c9t-1][-(c7t+2)+c8]); ey[-(c7t+2)+(c9t+1)][-(c7t+2)+c8]=ey[-(c7t+2)+(c9t+1)][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+(c9t+1)][-(c7t+2)+c8]-hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8]); ey[-(c7t+2)+(c9t+2)][-(c7t+2)+c8]=ey[-(c7t+2)+(c9t+2)][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+(c9t+2)][-(c7t+2)+c8]-hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8]); ey[-(c7t+2)+(c9t+3)][-(c7t+2)+c8]=ey[-(c7t+2)+(c9t+3)][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+(c9t+3)][-(c7t+2)+c8]-hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8]); ey[-(c7t+3)+c9t][-(c7t+3)+c8]=ey[-(c7t+3)+c9t][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+c9t][-(c7t+3)+c8]-hz[-(c7t+3)+c9t-1][-(c7t+3)+c8]); ey[-(c7t+3)+(c9t+1)][-(c7t+3)+c8]=ey[-(c7t+3)+(c9t+1)][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+(c9t+1)][-(c7t+3)+c8]-hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8]); ey[-(c7t+3)+(c9t+2)][-(c7t+3)+c8]=ey[-(c7t+3)+(c9t+2)][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+(c9t+2)][-(c7t+3)+c8]-hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8]); ey[-(c7t+3)+(c9t+3)][-(c7t+3)+c8]=ey[-(c7t+3)+(c9t+3)][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+(c9t+3)][-(c7t+3)+c8]-hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8]); ey[-(c7t+4)+c9t][-(c7t+4)+c8]=ey[-(c7t+4)+c9t][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+c9t][-(c7t+4)+c8]-hz[-(c7t+4)+c9t-1][-(c7t+4)+c8]); ey[-(c7t+4)+(c9t+1)][-(c7t+4)+c8]=ey[-(c7t+4)+(c9t+1)][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+(c9t+1)][-(c7t+4)+c8]-hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8]); ey[-(c7t+4)+(c9t+2)][-(c7t+4)+c8]=ey[-(c7t+4)+(c9t+2)][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+(c9t+2)][-(c7t+4)+c8]-hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8]); ey[-(c7t+4)+(c9t+3)][-(c7t+4)+c8]=ey[-(c7t+4)+(c9t+3)][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+(c9t+3)][-(c7t+4)+c8]-hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8]); ey[-(c7t+5)+c9t][-(c7t+5)+c8]=ey[-(c7t+5)+c9t][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+c9t][-(c7t+5)+c8]-hz[-(c7t+5)+c9t-1][-(c7t+5)+c8]); ey[-(c7t+5)+(c9t+1)][-(c7t+5)+c8]=ey[-(c7t+5)+(c9t+1)][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+(c9t+1)][-(c7t+5)+c8]-hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8]); ey[-(c7t+5)+(c9t+2)][-(c7t+5)+c8]=ey[-(c7t+5)+(c9t+2)][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+(c9t+2)][-(c7t+5)+c8]-hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8]); ey[-(c7t+5)+(c9t+3)][-(c7t+5)+c8]=ey[-(c7t+5)+(c9t+3)][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+(c9t+3)][-(c7t+5)+c8]-hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8]); ey[-(c7t+6)+c9t][-(c7t+6)+c8]=ey[-(c7t+6)+c9t][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+c9t][-(c7t+6)+c8]-hz[-(c7t+6)+c9t-1][-(c7t+6)+c8]); ey[-(c7t+6)+(c9t+1)][-(c7t+6)+c8]=ey[-(c7t+6)+(c9t+1)][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+(c9t+1)][-(c7t+6)+c8]-hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8]); ey[-(c7t+6)+(c9t+2)][-(c7t+6)+c8]=ey[-(c7t+6)+(c9t+2)][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+(c9t+2)][-(c7t+6)+c8]-hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8]); ey[-(c7t+6)+(c9t+3)][-(c7t+6)+c8]=ey[-(c7t+6)+(c9t+3)][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+(c9t+3)][-(c7t+6)+c8]-hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8]); ey[-(c7t+7)+c9t][-(c7t+7)+c8]=ey[-(c7t+7)+c9t][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+c9t][-(c7t+7)+c8]-hz[-(c7t+7)+c9t-1][-(c7t+7)+c8]); ey[-(c7t+7)+(c9t+1)][-(c7t+7)+c8]=ey[-(c7t+7)+(c9t+1)][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+(c9t+1)][-(c7t+7)+c8]-hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8]); ey[-(c7t+7)+(c9t+2)][-(c7t+7)+c8]=ey[-(c7t+7)+(c9t+2)][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+(c9t+2)][-(c7t+7)+c8]-hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8]); ey[-(c7t+7)+(c9t+3)][-(c7t+7)+c8]=ey[-(c7t+7)+(c9t+3)][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+(c9t+3)][-(c7t+7)+c8]-hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8]); ex[-c7t+c9t][-c7t+c8]=ex[-c7t+c9t][-c7t+c8]-((double)(1))/2*(hz[-c7t+c9t][-c7t+c8]-hz[-c7t+c9t][-c7t+c8-1]); ex[-c7t+(c9t+1)][-c7t+c8]=ex[-c7t+(c9t+1)][-c7t+c8]-((double)(1))/2*(hz[-c7t+(c9t+1)][-c7t+c8]-hz[-c7t+(c9t+1)][-c7t+c8-1]); ex[-c7t+(c9t+2)][-c7t+c8]=ex[-c7t+(c9t+2)][-c7t+c8]-((double)(1))/2*(hz[-c7t+(c9t+2)][-c7t+c8]-hz[-c7t+(c9t+2)][-c7t+c8-1]); ex[-c7t+(c9t+3)][-c7t+c8]=ex[-c7t+(c9t+3)][-c7t+c8]-((double)(1))/2*(hz[-c7t+(c9t+3)][-c7t+c8]-hz[-c7t+(c9t+3)][-c7t+c8-1]); ex[-(c7t+1)+c9t][-(c7t+1)+c8]=ex[-(c7t+1)+c9t][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+c9t][-(c7t+1)+c8]-hz[-(c7t+1)+c9t][-(c7t+1)+c8-1]); ex[-(c7t+1)+(c9t+1)][-(c7t+1)+c8]=ex[-(c7t+1)+(c9t+1)][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+(c9t+1)][-(c7t+1)+c8]-hz[-(c7t+1)+(c9t+1)][-(c7t+1)+c8-1]); ex[-(c7t+1)+(c9t+2)][-(c7t+1)+c8]=ex[-(c7t+1)+(c9t+2)][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+(c9t+2)][-(c7t+1)+c8]-hz[-(c7t+1)+(c9t+2)][-(c7t+1)+c8-1]); ex[-(c7t+1)+(c9t+3)][-(c7t+1)+c8]=ex[-(c7t+1)+(c9t+3)][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+(c9t+3)][-(c7t+1)+c8]-hz[-(c7t+1)+(c9t+3)][-(c7t+1)+c8-1]); ex[-(c7t+2)+c9t][-(c7t+2)+c8]=ex[-(c7t+2)+c9t][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+c9t][-(c7t+2)+c8]-hz[-(c7t+2)+c9t][-(c7t+2)+c8-1]); ex[-(c7t+2)+(c9t+1)][-(c7t+2)+c8]=ex[-(c7t+2)+(c9t+1)][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+(c9t+1)][-(c7t+2)+c8]-hz[-(c7t+2)+(c9t+1)][-(c7t+2)+c8-1]); ex[-(c7t+2)+(c9t+2)][-(c7t+2)+c8]=ex[-(c7t+2)+(c9t+2)][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+(c9t+2)][-(c7t+2)+c8]-hz[-(c7t+2)+(c9t+2)][-(c7t+2)+c8-1]); ex[-(c7t+2)+(c9t+3)][-(c7t+2)+c8]=ex[-(c7t+2)+(c9t+3)][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+(c9t+3)][-(c7t+2)+c8]-hz[-(c7t+2)+(c9t+3)][-(c7t+2)+c8-1]); ex[-(c7t+3)+c9t][-(c7t+3)+c8]=ex[-(c7t+3)+c9t][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+c9t][-(c7t+3)+c8]-hz[-(c7t+3)+c9t][-(c7t+3)+c8-1]); ex[-(c7t+3)+(c9t+1)][-(c7t+3)+c8]=ex[-(c7t+3)+(c9t+1)][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+(c9t+1)][-(c7t+3)+c8]-hz[-(c7t+3)+(c9t+1)][-(c7t+3)+c8-1]); ex[-(c7t+3)+(c9t+2)][-(c7t+3)+c8]=ex[-(c7t+3)+(c9t+2)][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+(c9t+2)][-(c7t+3)+c8]-hz[-(c7t+3)+(c9t+2)][-(c7t+3)+c8-1]); ex[-(c7t+3)+(c9t+3)][-(c7t+3)+c8]=ex[-(c7t+3)+(c9t+3)][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+(c9t+3)][-(c7t+3)+c8]-hz[-(c7t+3)+(c9t+3)][-(c7t+3)+c8-1]); ex[-(c7t+4)+c9t][-(c7t+4)+c8]=ex[-(c7t+4)+c9t][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+c9t][-(c7t+4)+c8]-hz[-(c7t+4)+c9t][-(c7t+4)+c8-1]); ex[-(c7t+4)+(c9t+1)][-(c7t+4)+c8]=ex[-(c7t+4)+(c9t+1)][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+(c9t+1)][-(c7t+4)+c8]-hz[-(c7t+4)+(c9t+1)][-(c7t+4)+c8-1]); ex[-(c7t+4)+(c9t+2)][-(c7t+4)+c8]=ex[-(c7t+4)+(c9t+2)][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+(c9t+2)][-(c7t+4)+c8]-hz[-(c7t+4)+(c9t+2)][-(c7t+4)+c8-1]); ex[-(c7t+4)+(c9t+3)][-(c7t+4)+c8]=ex[-(c7t+4)+(c9t+3)][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+(c9t+3)][-(c7t+4)+c8]-hz[-(c7t+4)+(c9t+3)][-(c7t+4)+c8-1]); ex[-(c7t+5)+c9t][-(c7t+5)+c8]=ex[-(c7t+5)+c9t][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+c9t][-(c7t+5)+c8]-hz[-(c7t+5)+c9t][-(c7t+5)+c8-1]); ex[-(c7t+5)+(c9t+1)][-(c7t+5)+c8]=ex[-(c7t+5)+(c9t+1)][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+(c9t+1)][-(c7t+5)+c8]-hz[-(c7t+5)+(c9t+1)][-(c7t+5)+c8-1]); ex[-(c7t+5)+(c9t+2)][-(c7t+5)+c8]=ex[-(c7t+5)+(c9t+2)][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+(c9t+2)][-(c7t+5)+c8]-hz[-(c7t+5)+(c9t+2)][-(c7t+5)+c8-1]); ex[-(c7t+5)+(c9t+3)][-(c7t+5)+c8]=ex[-(c7t+5)+(c9t+3)][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+(c9t+3)][-(c7t+5)+c8]-hz[-(c7t+5)+(c9t+3)][-(c7t+5)+c8-1]); ex[-(c7t+6)+c9t][-(c7t+6)+c8]=ex[-(c7t+6)+c9t][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+c9t][-(c7t+6)+c8]-hz[-(c7t+6)+c9t][-(c7t+6)+c8-1]); ex[-(c7t+6)+(c9t+1)][-(c7t+6)+c8]=ex[-(c7t+6)+(c9t+1)][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+(c9t+1)][-(c7t+6)+c8]-hz[-(c7t+6)+(c9t+1)][-(c7t+6)+c8-1]); ex[-(c7t+6)+(c9t+2)][-(c7t+6)+c8]=ex[-(c7t+6)+(c9t+2)][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+(c9t+2)][-(c7t+6)+c8]-hz[-(c7t+6)+(c9t+2)][-(c7t+6)+c8-1]); ex[-(c7t+6)+(c9t+3)][-(c7t+6)+c8]=ex[-(c7t+6)+(c9t+3)][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+(c9t+3)][-(c7t+6)+c8]-hz[-(c7t+6)+(c9t+3)][-(c7t+6)+c8-1]); ex[-(c7t+7)+c9t][-(c7t+7)+c8]=ex[-(c7t+7)+c9t][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+c9t][-(c7t+7)+c8]-hz[-(c7t+7)+c9t][-(c7t+7)+c8-1]); ex[-(c7t+7)+(c9t+1)][-(c7t+7)+c8]=ex[-(c7t+7)+(c9t+1)][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+(c9t+1)][-(c7t+7)+c8]-hz[-(c7t+7)+(c9t+1)][-(c7t+7)+c8-1]); ex[-(c7t+7)+(c9t+2)][-(c7t+7)+c8]=ex[-(c7t+7)+(c9t+2)][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+(c9t+2)][-(c7t+7)+c8]-hz[-(c7t+7)+(c9t+2)][-(c7t+7)+c8-1]); ex[-(c7t+7)+(c9t+3)][-(c7t+7)+c8]=ex[-(c7t+7)+(c9t+3)][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+(c9t+3)][-(c7t+7)+c8]-hz[-(c7t+7)+(c9t+3)][-(c7t+7)+c8-1]); hz[-c7t+c9t-1][-c7t+c8-1]=hz[-c7t+c9t-1][-c7t+c8-1]-((double)(7))/10*(ey[1+-c7t+c9t-1][-c7t+c8-1]+ex[-c7t+c9t-1][1+-c7t+c8-1]-ex[-c7t+c9t-1][-c7t+c8-1]-ey[-c7t+c9t-1][-c7t+c8-1]); hz[-c7t+(c9t+1)-1][-c7t+c8-1]=hz[-c7t+(c9t+1)-1][-c7t+c8-1]-((double)(7))/10*(ey[1+-c7t+(c9t+1)-1][-c7t+c8-1]+ex[-c7t+(c9t+1)-1][1+-c7t+c8-1]-ex[-c7t+(c9t+1)-1][-c7t+c8-1]-ey[-c7t+(c9t+1)-1][-c7t+c8-1]); hz[-c7t+(c9t+2)-1][-c7t+c8-1]=hz[-c7t+(c9t+2)-1][-c7t+c8-1]-((double)(7))/10*(ey[1+-c7t+(c9t+2)-1][-c7t+c8-1]+ex[-c7t+(c9t+2)-1][1+-c7t+c8-1]-ex[-c7t+(c9t+2)-1][-c7t+c8-1]-ey[-c7t+(c9t+2)-1][-c7t+c8-1]); hz[-c7t+(c9t+3)-1][-c7t+c8-1]=hz[-c7t+(c9t+3)-1][-c7t+c8-1]-((double)(7))/10*(ey[1+-c7t+(c9t+3)-1][-c7t+c8-1]+ex[-c7t+(c9t+3)-1][1+-c7t+c8-1]-ex[-c7t+(c9t+3)-1][-c7t+c8-1]-ey[-c7t+(c9t+3)-1][-c7t+c8-1]); hz[-(c7t+1)+c9t-1][-(c7t+1)+c8-1]=hz[-(c7t+1)+c9t-1][-(c7t+1)+c8-1]-((double)(7))/10*(ey[1+-(c7t+1)+c9t-1][-(c7t+1)+c8-1]+ex[-(c7t+1)+c9t-1][1+-(c7t+1)+c8-1]-ex[-(c7t+1)+c9t-1][-(c7t+1)+c8-1]-ey[-(c7t+1)+c9t-1][-(c7t+1)+c8-1]); hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8-1]=hz[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8-1]+ex[-(c7t+1)+(c9t+1)-1][1+-(c7t+1)+c8-1]-ex[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8-1]-ey[-(c7t+1)+(c9t+1)-1][-(c7t+1)+c8-1]); hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8-1]=hz[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8-1]+ex[-(c7t+1)+(c9t+2)-1][1+-(c7t+1)+c8-1]-ex[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8-1]-ey[-(c7t+1)+(c9t+2)-1][-(c7t+1)+c8-1]); hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8-1]=hz[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8-1]-((double)(7))/10*(ey[1+-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8-1]+ex[-(c7t+1)+(c9t+3)-1][1+-(c7t+1)+c8-1]-ex[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8-1]-ey[-(c7t+1)+(c9t+3)-1][-(c7t+1)+c8-1]); hz[-(c7t+2)+c9t-1][-(c7t+2)+c8-1]=hz[-(c7t+2)+c9t-1][-(c7t+2)+c8-1]-((double)(7))/10*(ey[1+-(c7t+2)+c9t-1][-(c7t+2)+c8-1]+ex[-(c7t+2)+c9t-1][1+-(c7t+2)+c8-1]-ex[-(c7t+2)+c9t-1][-(c7t+2)+c8-1]-ey[-(c7t+2)+c9t-1][-(c7t+2)+c8-1]); hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8-1]=hz[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8-1]+ex[-(c7t+2)+(c9t+1)-1][1+-(c7t+2)+c8-1]-ex[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8-1]-ey[-(c7t+2)+(c9t+1)-1][-(c7t+2)+c8-1]); hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8-1]=hz[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8-1]+ex[-(c7t+2)+(c9t+2)-1][1+-(c7t+2)+c8-1]-ex[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8-1]-ey[-(c7t+2)+(c9t+2)-1][-(c7t+2)+c8-1]); hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8-1]=hz[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8-1]-((double)(7))/10*(ey[1+-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8-1]+ex[-(c7t+2)+(c9t+3)-1][1+-(c7t+2)+c8-1]-ex[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8-1]-ey[-(c7t+2)+(c9t+3)-1][-(c7t+2)+c8-1]); hz[-(c7t+3)+c9t-1][-(c7t+3)+c8-1]=hz[-(c7t+3)+c9t-1][-(c7t+3)+c8-1]-((double)(7))/10*(ey[1+-(c7t+3)+c9t-1][-(c7t+3)+c8-1]+ex[-(c7t+3)+c9t-1][1+-(c7t+3)+c8-1]-ex[-(c7t+3)+c9t-1][-(c7t+3)+c8-1]-ey[-(c7t+3)+c9t-1][-(c7t+3)+c8-1]); hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8-1]=hz[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8-1]+ex[-(c7t+3)+(c9t+1)-1][1+-(c7t+3)+c8-1]-ex[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8-1]-ey[-(c7t+3)+(c9t+1)-1][-(c7t+3)+c8-1]); hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8-1]=hz[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8-1]+ex[-(c7t+3)+(c9t+2)-1][1+-(c7t+3)+c8-1]-ex[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8-1]-ey[-(c7t+3)+(c9t+2)-1][-(c7t+3)+c8-1]); hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8-1]=hz[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8-1]-((double)(7))/10*(ey[1+-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8-1]+ex[-(c7t+3)+(c9t+3)-1][1+-(c7t+3)+c8-1]-ex[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8-1]-ey[-(c7t+3)+(c9t+3)-1][-(c7t+3)+c8-1]); hz[-(c7t+4)+c9t-1][-(c7t+4)+c8-1]=hz[-(c7t+4)+c9t-1][-(c7t+4)+c8-1]-((double)(7))/10*(ey[1+-(c7t+4)+c9t-1][-(c7t+4)+c8-1]+ex[-(c7t+4)+c9t-1][1+-(c7t+4)+c8-1]-ex[-(c7t+4)+c9t-1][-(c7t+4)+c8-1]-ey[-(c7t+4)+c9t-1][-(c7t+4)+c8-1]); hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8-1]=hz[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8-1]+ex[-(c7t+4)+(c9t+1)-1][1+-(c7t+4)+c8-1]-ex[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8-1]-ey[-(c7t+4)+(c9t+1)-1][-(c7t+4)+c8-1]); hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8-1]=hz[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8-1]+ex[-(c7t+4)+(c9t+2)-1][1+-(c7t+4)+c8-1]-ex[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8-1]-ey[-(c7t+4)+(c9t+2)-1][-(c7t+4)+c8-1]); hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8-1]=hz[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8-1]-((double)(7))/10*(ey[1+-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8-1]+ex[-(c7t+4)+(c9t+3)-1][1+-(c7t+4)+c8-1]-ex[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8-1]-ey[-(c7t+4)+(c9t+3)-1][-(c7t+4)+c8-1]); hz[-(c7t+5)+c9t-1][-(c7t+5)+c8-1]=hz[-(c7t+5)+c9t-1][-(c7t+5)+c8-1]-((double)(7))/10*(ey[1+-(c7t+5)+c9t-1][-(c7t+5)+c8-1]+ex[-(c7t+5)+c9t-1][1+-(c7t+5)+c8-1]-ex[-(c7t+5)+c9t-1][-(c7t+5)+c8-1]-ey[-(c7t+5)+c9t-1][-(c7t+5)+c8-1]); hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8-1]=hz[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8-1]+ex[-(c7t+5)+(c9t+1)-1][1+-(c7t+5)+c8-1]-ex[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8-1]-ey[-(c7t+5)+(c9t+1)-1][-(c7t+5)+c8-1]); hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8-1]=hz[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8-1]+ex[-(c7t+5)+(c9t+2)-1][1+-(c7t+5)+c8-1]-ex[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8-1]-ey[-(c7t+5)+(c9t+2)-1][-(c7t+5)+c8-1]); hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8-1]=hz[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8-1]-((double)(7))/10*(ey[1+-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8-1]+ex[-(c7t+5)+(c9t+3)-1][1+-(c7t+5)+c8-1]-ex[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8-1]-ey[-(c7t+5)+(c9t+3)-1][-(c7t+5)+c8-1]); hz[-(c7t+6)+c9t-1][-(c7t+6)+c8-1]=hz[-(c7t+6)+c9t-1][-(c7t+6)+c8-1]-((double)(7))/10*(ey[1+-(c7t+6)+c9t-1][-(c7t+6)+c8-1]+ex[-(c7t+6)+c9t-1][1+-(c7t+6)+c8-1]-ex[-(c7t+6)+c9t-1][-(c7t+6)+c8-1]-ey[-(c7t+6)+c9t-1][-(c7t+6)+c8-1]); hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8-1]=hz[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8-1]+ex[-(c7t+6)+(c9t+1)-1][1+-(c7t+6)+c8-1]-ex[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8-1]-ey[-(c7t+6)+(c9t+1)-1][-(c7t+6)+c8-1]); hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8-1]=hz[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8-1]+ex[-(c7t+6)+(c9t+2)-1][1+-(c7t+6)+c8-1]-ex[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8-1]-ey[-(c7t+6)+(c9t+2)-1][-(c7t+6)+c8-1]); hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8-1]=hz[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8-1]-((double)(7))/10*(ey[1+-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8-1]+ex[-(c7t+6)+(c9t+3)-1][1+-(c7t+6)+c8-1]-ex[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8-1]-ey[-(c7t+6)+(c9t+3)-1][-(c7t+6)+c8-1]); hz[-(c7t+7)+c9t-1][-(c7t+7)+c8-1]=hz[-(c7t+7)+c9t-1][-(c7t+7)+c8-1]-((double)(7))/10*(ey[1+-(c7t+7)+c9t-1][-(c7t+7)+c8-1]+ex[-(c7t+7)+c9t-1][1+-(c7t+7)+c8-1]-ex[-(c7t+7)+c9t-1][-(c7t+7)+c8-1]-ey[-(c7t+7)+c9t-1][-(c7t+7)+c8-1]); hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8-1]=hz[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8-1]+ex[-(c7t+7)+(c9t+1)-1][1+-(c7t+7)+c8-1]-ex[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8-1]-ey[-(c7t+7)+(c9t+1)-1][-(c7t+7)+c8-1]); hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8-1]=hz[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8-1]+ex[-(c7t+7)+(c9t+2)-1][1+-(c7t+7)+c8-1]-ex[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8-1]-ey[-(c7t+7)+(c9t+2)-1][-(c7t+7)+c8-1]); hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8-1]=hz[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8-1]-((double)(7))/10*(ey[1+-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8-1]+ex[-(c7t+7)+(c9t+3)-1][1+-(c7t+7)+c8-1]-ex[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8-1]-ey[-(c7t+7)+(c9t+3)-1][-(c7t+7)+c8-1]); } } for (c9=c9t; c9<=32*c3+31; c9=c9+1) { register int cbv_4, cbv_5; cbv_4=32*c2; cbv_5=32*c2+30; #pragma ivdep #pragma vector always for (c8t=cbv_4; c8t<=cbv_5; c8t=c8t+2) { ey[-c7t+c9][-c7t+c8t]=ey[-c7t+c9][-c7t+c8t]-((double)(1))/2*(hz[-c7t+c9][-c7t+c8t]-hz[-c7t+c9-1][-c7t+c8t]); ey[-c7t+c9][-c7t+(c8t+1)]=ey[-c7t+c9][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+c9][-c7t+(c8t+1)]-hz[-c7t+c9-1][-c7t+(c8t+1)]); ey[-(c7t+1)+c9][-(c7t+1)+c8t]=ey[-(c7t+1)+c9][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+c9][-(c7t+1)+c8t]-hz[-(c7t+1)+c9-1][-(c7t+1)+c8t]); ey[-(c7t+1)+c9][-(c7t+1)+(c8t+1)]=ey[-(c7t+1)+c9][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+c9][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+c9-1][-(c7t+1)+(c8t+1)]); ey[-(c7t+2)+c9][-(c7t+2)+c8t]=ey[-(c7t+2)+c9][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+c9][-(c7t+2)+c8t]-hz[-(c7t+2)+c9-1][-(c7t+2)+c8t]); ey[-(c7t+2)+c9][-(c7t+2)+(c8t+1)]=ey[-(c7t+2)+c9][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+c9][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+c9-1][-(c7t+2)+(c8t+1)]); ey[-(c7t+3)+c9][-(c7t+3)+c8t]=ey[-(c7t+3)+c9][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+c9][-(c7t+3)+c8t]-hz[-(c7t+3)+c9-1][-(c7t+3)+c8t]); ey[-(c7t+3)+c9][-(c7t+3)+(c8t+1)]=ey[-(c7t+3)+c9][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+c9][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+c9-1][-(c7t+3)+(c8t+1)]); ey[-(c7t+4)+c9][-(c7t+4)+c8t]=ey[-(c7t+4)+c9][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+c9][-(c7t+4)+c8t]-hz[-(c7t+4)+c9-1][-(c7t+4)+c8t]); ey[-(c7t+4)+c9][-(c7t+4)+(c8t+1)]=ey[-(c7t+4)+c9][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+c9][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+c9-1][-(c7t+4)+(c8t+1)]); ey[-(c7t+5)+c9][-(c7t+5)+c8t]=ey[-(c7t+5)+c9][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+c9][-(c7t+5)+c8t]-hz[-(c7t+5)+c9-1][-(c7t+5)+c8t]); ey[-(c7t+5)+c9][-(c7t+5)+(c8t+1)]=ey[-(c7t+5)+c9][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+c9][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+c9-1][-(c7t+5)+(c8t+1)]); ey[-(c7t+6)+c9][-(c7t+6)+c8t]=ey[-(c7t+6)+c9][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+c9][-(c7t+6)+c8t]-hz[-(c7t+6)+c9-1][-(c7t+6)+c8t]); ey[-(c7t+6)+c9][-(c7t+6)+(c8t+1)]=ey[-(c7t+6)+c9][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+c9][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+c9-1][-(c7t+6)+(c8t+1)]); ey[-(c7t+7)+c9][-(c7t+7)+c8t]=ey[-(c7t+7)+c9][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+c9][-(c7t+7)+c8t]-hz[-(c7t+7)+c9-1][-(c7t+7)+c8t]); ey[-(c7t+7)+c9][-(c7t+7)+(c8t+1)]=ey[-(c7t+7)+c9][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+c9][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+c9-1][-(c7t+7)+(c8t+1)]); ex[-c7t+c9][-c7t+c8t]=ex[-c7t+c9][-c7t+c8t]-((double)(1))/2*(hz[-c7t+c9][-c7t+c8t]-hz[-c7t+c9][-c7t+c8t-1]); ex[-c7t+c9][-c7t+(c8t+1)]=ex[-c7t+c9][-c7t+(c8t+1)]-((double)(1))/2*(hz[-c7t+c9][-c7t+(c8t+1)]-hz[-c7t+c9][-c7t+(c8t+1)-1]); ex[-(c7t+1)+c9][-(c7t+1)+c8t]=ex[-(c7t+1)+c9][-(c7t+1)+c8t]-((double)(1))/2*(hz[-(c7t+1)+c9][-(c7t+1)+c8t]-hz[-(c7t+1)+c9][-(c7t+1)+c8t-1]); ex[-(c7t+1)+c9][-(c7t+1)+(c8t+1)]=ex[-(c7t+1)+c9][-(c7t+1)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+1)+c9][-(c7t+1)+(c8t+1)]-hz[-(c7t+1)+c9][-(c7t+1)+(c8t+1)-1]); ex[-(c7t+2)+c9][-(c7t+2)+c8t]=ex[-(c7t+2)+c9][-(c7t+2)+c8t]-((double)(1))/2*(hz[-(c7t+2)+c9][-(c7t+2)+c8t]-hz[-(c7t+2)+c9][-(c7t+2)+c8t-1]); ex[-(c7t+2)+c9][-(c7t+2)+(c8t+1)]=ex[-(c7t+2)+c9][-(c7t+2)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+2)+c9][-(c7t+2)+(c8t+1)]-hz[-(c7t+2)+c9][-(c7t+2)+(c8t+1)-1]); ex[-(c7t+3)+c9][-(c7t+3)+c8t]=ex[-(c7t+3)+c9][-(c7t+3)+c8t]-((double)(1))/2*(hz[-(c7t+3)+c9][-(c7t+3)+c8t]-hz[-(c7t+3)+c9][-(c7t+3)+c8t-1]); ex[-(c7t+3)+c9][-(c7t+3)+(c8t+1)]=ex[-(c7t+3)+c9][-(c7t+3)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+3)+c9][-(c7t+3)+(c8t+1)]-hz[-(c7t+3)+c9][-(c7t+3)+(c8t+1)-1]); ex[-(c7t+4)+c9][-(c7t+4)+c8t]=ex[-(c7t+4)+c9][-(c7t+4)+c8t]-((double)(1))/2*(hz[-(c7t+4)+c9][-(c7t+4)+c8t]-hz[-(c7t+4)+c9][-(c7t+4)+c8t-1]); ex[-(c7t+4)+c9][-(c7t+4)+(c8t+1)]=ex[-(c7t+4)+c9][-(c7t+4)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+4)+c9][-(c7t+4)+(c8t+1)]-hz[-(c7t+4)+c9][-(c7t+4)+(c8t+1)-1]); ex[-(c7t+5)+c9][-(c7t+5)+c8t]=ex[-(c7t+5)+c9][-(c7t+5)+c8t]-((double)(1))/2*(hz[-(c7t+5)+c9][-(c7t+5)+c8t]-hz[-(c7t+5)+c9][-(c7t+5)+c8t-1]); ex[-(c7t+5)+c9][-(c7t+5)+(c8t+1)]=ex[-(c7t+5)+c9][-(c7t+5)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+5)+c9][-(c7t+5)+(c8t+1)]-hz[-(c7t+5)+c9][-(c7t+5)+(c8t+1)-1]); ex[-(c7t+6)+c9][-(c7t+6)+c8t]=ex[-(c7t+6)+c9][-(c7t+6)+c8t]-((double)(1))/2*(hz[-(c7t+6)+c9][-(c7t+6)+c8t]-hz[-(c7t+6)+c9][-(c7t+6)+c8t-1]); ex[-(c7t+6)+c9][-(c7t+6)+(c8t+1)]=ex[-(c7t+6)+c9][-(c7t+6)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+6)+c9][-(c7t+6)+(c8t+1)]-hz[-(c7t+6)+c9][-(c7t+6)+(c8t+1)-1]); ex[-(c7t+7)+c9][-(c7t+7)+c8t]=ex[-(c7t+7)+c9][-(c7t+7)+c8t]-((double)(1))/2*(hz[-(c7t+7)+c9][-(c7t+7)+c8t]-hz[-(c7t+7)+c9][-(c7t+7)+c8t-1]); ex[-(c7t+7)+c9][-(c7t+7)+(c8t+1)]=ex[-(c7t+7)+c9][-(c7t+7)+(c8t+1)]-((double)(1))/2*(hz[-(c7t+7)+c9][-(c7t+7)+(c8t+1)]-hz[-(c7t+7)+c9][-(c7t+7)+(c8t+1)-1]); hz[-c7t+c9-1][-c7t+c8t-1]=hz[-c7t+c9-1][-c7t+c8t-1]-((double)(7))/10*(ey[1+-c7t+c9-1][-c7t+c8t-1]+ex[-c7t+c9-1][1+-c7t+c8t-1]-ex[-c7t+c9-1][-c7t+c8t-1]-ey[-c7t+c9-1][-c7t+c8t-1]); hz[-c7t+c9-1][-c7t+(c8t+1)-1]=hz[-c7t+c9-1][-c7t+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7t+c9-1][-c7t+(c8t+1)-1]+ex[-c7t+c9-1][1+-c7t+(c8t+1)-1]-ex[-c7t+c9-1][-c7t+(c8t+1)-1]-ey[-c7t+c9-1][-c7t+(c8t+1)-1]); hz[-(c7t+1)+c9-1][-(c7t+1)+c8t-1]=hz[-(c7t+1)+c9-1][-(c7t+1)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+1)+c9-1][-(c7t+1)+c8t-1]+ex[-(c7t+1)+c9-1][1+-(c7t+1)+c8t-1]-ex[-(c7t+1)+c9-1][-(c7t+1)+c8t-1]-ey[-(c7t+1)+c9-1][-(c7t+1)+c8t-1]); hz[-(c7t+1)+c9-1][-(c7t+1)+(c8t+1)-1]=hz[-(c7t+1)+c9-1][-(c7t+1)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+1)+c9-1][-(c7t+1)+(c8t+1)-1]+ex[-(c7t+1)+c9-1][1+-(c7t+1)+(c8t+1)-1]-ex[-(c7t+1)+c9-1][-(c7t+1)+(c8t+1)-1]-ey[-(c7t+1)+c9-1][-(c7t+1)+(c8t+1)-1]); hz[-(c7t+2)+c9-1][-(c7t+2)+c8t-1]=hz[-(c7t+2)+c9-1][-(c7t+2)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+2)+c9-1][-(c7t+2)+c8t-1]+ex[-(c7t+2)+c9-1][1+-(c7t+2)+c8t-1]-ex[-(c7t+2)+c9-1][-(c7t+2)+c8t-1]-ey[-(c7t+2)+c9-1][-(c7t+2)+c8t-1]); hz[-(c7t+2)+c9-1][-(c7t+2)+(c8t+1)-1]=hz[-(c7t+2)+c9-1][-(c7t+2)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+2)+c9-1][-(c7t+2)+(c8t+1)-1]+ex[-(c7t+2)+c9-1][1+-(c7t+2)+(c8t+1)-1]-ex[-(c7t+2)+c9-1][-(c7t+2)+(c8t+1)-1]-ey[-(c7t+2)+c9-1][-(c7t+2)+(c8t+1)-1]); hz[-(c7t+3)+c9-1][-(c7t+3)+c8t-1]=hz[-(c7t+3)+c9-1][-(c7t+3)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+3)+c9-1][-(c7t+3)+c8t-1]+ex[-(c7t+3)+c9-1][1+-(c7t+3)+c8t-1]-ex[-(c7t+3)+c9-1][-(c7t+3)+c8t-1]-ey[-(c7t+3)+c9-1][-(c7t+3)+c8t-1]); hz[-(c7t+3)+c9-1][-(c7t+3)+(c8t+1)-1]=hz[-(c7t+3)+c9-1][-(c7t+3)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+3)+c9-1][-(c7t+3)+(c8t+1)-1]+ex[-(c7t+3)+c9-1][1+-(c7t+3)+(c8t+1)-1]-ex[-(c7t+3)+c9-1][-(c7t+3)+(c8t+1)-1]-ey[-(c7t+3)+c9-1][-(c7t+3)+(c8t+1)-1]); hz[-(c7t+4)+c9-1][-(c7t+4)+c8t-1]=hz[-(c7t+4)+c9-1][-(c7t+4)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+4)+c9-1][-(c7t+4)+c8t-1]+ex[-(c7t+4)+c9-1][1+-(c7t+4)+c8t-1]-ex[-(c7t+4)+c9-1][-(c7t+4)+c8t-1]-ey[-(c7t+4)+c9-1][-(c7t+4)+c8t-1]); hz[-(c7t+4)+c9-1][-(c7t+4)+(c8t+1)-1]=hz[-(c7t+4)+c9-1][-(c7t+4)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+4)+c9-1][-(c7t+4)+(c8t+1)-1]+ex[-(c7t+4)+c9-1][1+-(c7t+4)+(c8t+1)-1]-ex[-(c7t+4)+c9-1][-(c7t+4)+(c8t+1)-1]-ey[-(c7t+4)+c9-1][-(c7t+4)+(c8t+1)-1]); hz[-(c7t+5)+c9-1][-(c7t+5)+c8t-1]=hz[-(c7t+5)+c9-1][-(c7t+5)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+5)+c9-1][-(c7t+5)+c8t-1]+ex[-(c7t+5)+c9-1][1+-(c7t+5)+c8t-1]-ex[-(c7t+5)+c9-1][-(c7t+5)+c8t-1]-ey[-(c7t+5)+c9-1][-(c7t+5)+c8t-1]); hz[-(c7t+5)+c9-1][-(c7t+5)+(c8t+1)-1]=hz[-(c7t+5)+c9-1][-(c7t+5)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+5)+c9-1][-(c7t+5)+(c8t+1)-1]+ex[-(c7t+5)+c9-1][1+-(c7t+5)+(c8t+1)-1]-ex[-(c7t+5)+c9-1][-(c7t+5)+(c8t+1)-1]-ey[-(c7t+5)+c9-1][-(c7t+5)+(c8t+1)-1]); hz[-(c7t+6)+c9-1][-(c7t+6)+c8t-1]=hz[-(c7t+6)+c9-1][-(c7t+6)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+6)+c9-1][-(c7t+6)+c8t-1]+ex[-(c7t+6)+c9-1][1+-(c7t+6)+c8t-1]-ex[-(c7t+6)+c9-1][-(c7t+6)+c8t-1]-ey[-(c7t+6)+c9-1][-(c7t+6)+c8t-1]); hz[-(c7t+6)+c9-1][-(c7t+6)+(c8t+1)-1]=hz[-(c7t+6)+c9-1][-(c7t+6)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+6)+c9-1][-(c7t+6)+(c8t+1)-1]+ex[-(c7t+6)+c9-1][1+-(c7t+6)+(c8t+1)-1]-ex[-(c7t+6)+c9-1][-(c7t+6)+(c8t+1)-1]-ey[-(c7t+6)+c9-1][-(c7t+6)+(c8t+1)-1]); hz[-(c7t+7)+c9-1][-(c7t+7)+c8t-1]=hz[-(c7t+7)+c9-1][-(c7t+7)+c8t-1]-((double)(7))/10*(ey[1+-(c7t+7)+c9-1][-(c7t+7)+c8t-1]+ex[-(c7t+7)+c9-1][1+-(c7t+7)+c8t-1]-ex[-(c7t+7)+c9-1][-(c7t+7)+c8t-1]-ey[-(c7t+7)+c9-1][-(c7t+7)+c8t-1]); hz[-(c7t+7)+c9-1][-(c7t+7)+(c8t+1)-1]=hz[-(c7t+7)+c9-1][-(c7t+7)+(c8t+1)-1]-((double)(7))/10*(ey[1+-(c7t+7)+c9-1][-(c7t+7)+(c8t+1)-1]+ex[-(c7t+7)+c9-1][1+-(c7t+7)+(c8t+1)-1]-ex[-(c7t+7)+c9-1][-(c7t+7)+(c8t+1)-1]-ey[-(c7t+7)+c9-1][-(c7t+7)+(c8t+1)-1]); } register int cbv_6; cbv_6=32*c2+31; #pragma ivdep #pragma vector always for (c8=c8t; c8<=cbv_6; c8=c8+1) { ey[-c7t+c9][-c7t+c8]=ey[-c7t+c9][-c7t+c8]-((double)(1))/2*(hz[-c7t+c9][-c7t+c8]-hz[-c7t+c9-1][-c7t+c8]); ey[-(c7t+1)+c9][-(c7t+1)+c8]=ey[-(c7t+1)+c9][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+c9][-(c7t+1)+c8]-hz[-(c7t+1)+c9-1][-(c7t+1)+c8]); ey[-(c7t+2)+c9][-(c7t+2)+c8]=ey[-(c7t+2)+c9][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+c9][-(c7t+2)+c8]-hz[-(c7t+2)+c9-1][-(c7t+2)+c8]); ey[-(c7t+3)+c9][-(c7t+3)+c8]=ey[-(c7t+3)+c9][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+c9][-(c7t+3)+c8]-hz[-(c7t+3)+c9-1][-(c7t+3)+c8]); ey[-(c7t+4)+c9][-(c7t+4)+c8]=ey[-(c7t+4)+c9][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+c9][-(c7t+4)+c8]-hz[-(c7t+4)+c9-1][-(c7t+4)+c8]); ey[-(c7t+5)+c9][-(c7t+5)+c8]=ey[-(c7t+5)+c9][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+c9][-(c7t+5)+c8]-hz[-(c7t+5)+c9-1][-(c7t+5)+c8]); ey[-(c7t+6)+c9][-(c7t+6)+c8]=ey[-(c7t+6)+c9][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+c9][-(c7t+6)+c8]-hz[-(c7t+6)+c9-1][-(c7t+6)+c8]); ey[-(c7t+7)+c9][-(c7t+7)+c8]=ey[-(c7t+7)+c9][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+c9][-(c7t+7)+c8]-hz[-(c7t+7)+c9-1][-(c7t+7)+c8]); ex[-c7t+c9][-c7t+c8]=ex[-c7t+c9][-c7t+c8]-((double)(1))/2*(hz[-c7t+c9][-c7t+c8]-hz[-c7t+c9][-c7t+c8-1]); ex[-(c7t+1)+c9][-(c7t+1)+c8]=ex[-(c7t+1)+c9][-(c7t+1)+c8]-((double)(1))/2*(hz[-(c7t+1)+c9][-(c7t+1)+c8]-hz[-(c7t+1)+c9][-(c7t+1)+c8-1]); ex[-(c7t+2)+c9][-(c7t+2)+c8]=ex[-(c7t+2)+c9][-(c7t+2)+c8]-((double)(1))/2*(hz[-(c7t+2)+c9][-(c7t+2)+c8]-hz[-(c7t+2)+c9][-(c7t+2)+c8-1]); ex[-(c7t+3)+c9][-(c7t+3)+c8]=ex[-(c7t+3)+c9][-(c7t+3)+c8]-((double)(1))/2*(hz[-(c7t+3)+c9][-(c7t+3)+c8]-hz[-(c7t+3)+c9][-(c7t+3)+c8-1]); ex[-(c7t+4)+c9][-(c7t+4)+c8]=ex[-(c7t+4)+c9][-(c7t+4)+c8]-((double)(1))/2*(hz[-(c7t+4)+c9][-(c7t+4)+c8]-hz[-(c7t+4)+c9][-(c7t+4)+c8-1]); ex[-(c7t+5)+c9][-(c7t+5)+c8]=ex[-(c7t+5)+c9][-(c7t+5)+c8]-((double)(1))/2*(hz[-(c7t+5)+c9][-(c7t+5)+c8]-hz[-(c7t+5)+c9][-(c7t+5)+c8-1]); ex[-(c7t+6)+c9][-(c7t+6)+c8]=ex[-(c7t+6)+c9][-(c7t+6)+c8]-((double)(1))/2*(hz[-(c7t+6)+c9][-(c7t+6)+c8]-hz[-(c7t+6)+c9][-(c7t+6)+c8-1]); ex[-(c7t+7)+c9][-(c7t+7)+c8]=ex[-(c7t+7)+c9][-(c7t+7)+c8]-((double)(1))/2*(hz[-(c7t+7)+c9][-(c7t+7)+c8]-hz[-(c7t+7)+c9][-(c7t+7)+c8-1]); hz[-c7t+c9-1][-c7t+c8-1]=hz[-c7t+c9-1][-c7t+c8-1]-((double)(7))/10*(ey[1+-c7t+c9-1][-c7t+c8-1]+ex[-c7t+c9-1][1+-c7t+c8-1]-ex[-c7t+c9-1][-c7t+c8-1]-ey[-c7t+c9-1][-c7t+c8-1]); hz[-(c7t+1)+c9-1][-(c7t+1)+c8-1]=hz[-(c7t+1)+c9-1][-(c7t+1)+c8-1]-((double)(7))/10*(ey[1+-(c7t+1)+c9-1][-(c7t+1)+c8-1]+ex[-(c7t+1)+c9-1][1+-(c7t+1)+c8-1]-ex[-(c7t+1)+c9-1][-(c7t+1)+c8-1]-ey[-(c7t+1)+c9-1][-(c7t+1)+c8-1]); hz[-(c7t+2)+c9-1][-(c7t+2)+c8-1]=hz[-(c7t+2)+c9-1][-(c7t+2)+c8-1]-((double)(7))/10*(ey[1+-(c7t+2)+c9-1][-(c7t+2)+c8-1]+ex[-(c7t+2)+c9-1][1+-(c7t+2)+c8-1]-ex[-(c7t+2)+c9-1][-(c7t+2)+c8-1]-ey[-(c7t+2)+c9-1][-(c7t+2)+c8-1]); hz[-(c7t+3)+c9-1][-(c7t+3)+c8-1]=hz[-(c7t+3)+c9-1][-(c7t+3)+c8-1]-((double)(7))/10*(ey[1+-(c7t+3)+c9-1][-(c7t+3)+c8-1]+ex[-(c7t+3)+c9-1][1+-(c7t+3)+c8-1]-ex[-(c7t+3)+c9-1][-(c7t+3)+c8-1]-ey[-(c7t+3)+c9-1][-(c7t+3)+c8-1]); hz[-(c7t+4)+c9-1][-(c7t+4)+c8-1]=hz[-(c7t+4)+c9-1][-(c7t+4)+c8-1]-((double)(7))/10*(ey[1+-(c7t+4)+c9-1][-(c7t+4)+c8-1]+ex[-(c7t+4)+c9-1][1+-(c7t+4)+c8-1]-ex[-(c7t+4)+c9-1][-(c7t+4)+c8-1]-ey[-(c7t+4)+c9-1][-(c7t+4)+c8-1]); hz[-(c7t+5)+c9-1][-(c7t+5)+c8-1]=hz[-(c7t+5)+c9-1][-(c7t+5)+c8-1]-((double)(7))/10*(ey[1+-(c7t+5)+c9-1][-(c7t+5)+c8-1]+ex[-(c7t+5)+c9-1][1+-(c7t+5)+c8-1]-ex[-(c7t+5)+c9-1][-(c7t+5)+c8-1]-ey[-(c7t+5)+c9-1][-(c7t+5)+c8-1]); hz[-(c7t+6)+c9-1][-(c7t+6)+c8-1]=hz[-(c7t+6)+c9-1][-(c7t+6)+c8-1]-((double)(7))/10*(ey[1+-(c7t+6)+c9-1][-(c7t+6)+c8-1]+ex[-(c7t+6)+c9-1][1+-(c7t+6)+c8-1]-ex[-(c7t+6)+c9-1][-(c7t+6)+c8-1]-ey[-(c7t+6)+c9-1][-(c7t+6)+c8-1]); hz[-(c7t+7)+c9-1][-(c7t+7)+c8-1]=hz[-(c7t+7)+c9-1][-(c7t+7)+c8-1]-((double)(7))/10*(ey[1+-(c7t+7)+c9-1][-(c7t+7)+c8-1]+ex[-(c7t+7)+c9-1][1+-(c7t+7)+c8-1]-ex[-(c7t+7)+c9-1][-(c7t+7)+c8-1]-ey[-(c7t+7)+c9-1][-(c7t+7)+c8-1]); } } } for (c7=c7t; c7<=min(min(min(tmax-1,32*c3-1),16*c1-16*c2+15),32*c2-1); c7=c7+1) { for (c9t=32*c3; c9t<=32*c3+28; c9t=c9t+4) { register int cbv_7, cbv_8; cbv_7=32*c2; cbv_8=32*c2+30; #pragma ivdep #pragma vector always for (c8t=cbv_7; c8t<=cbv_8; c8t=c8t+2) { ey[-c7+c9t][-c7+c8t]=ey[-c7+c9t][-c7+c8t]-((double)(1))/2*(hz[-c7+c9t][-c7+c8t]-hz[-c7+c9t-1][-c7+c8t]); ey[-c7+c9t][-c7+(c8t+1)]=ey[-c7+c9t][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+c9t][-c7+(c8t+1)]-hz[-c7+c9t-1][-c7+(c8t+1)]); ey[-c7+(c9t+1)][-c7+c8t]=ey[-c7+(c9t+1)][-c7+c8t]-((double)(1))/2*(hz[-c7+(c9t+1)][-c7+c8t]-hz[-c7+(c9t+1)-1][-c7+c8t]); ey[-c7+(c9t+1)][-c7+(c8t+1)]=ey[-c7+(c9t+1)][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+(c9t+1)][-c7+(c8t+1)]-hz[-c7+(c9t+1)-1][-c7+(c8t+1)]); ey[-c7+(c9t+2)][-c7+c8t]=ey[-c7+(c9t+2)][-c7+c8t]-((double)(1))/2*(hz[-c7+(c9t+2)][-c7+c8t]-hz[-c7+(c9t+2)-1][-c7+c8t]); ey[-c7+(c9t+2)][-c7+(c8t+1)]=ey[-c7+(c9t+2)][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+(c9t+2)][-c7+(c8t+1)]-hz[-c7+(c9t+2)-1][-c7+(c8t+1)]); ey[-c7+(c9t+3)][-c7+c8t]=ey[-c7+(c9t+3)][-c7+c8t]-((double)(1))/2*(hz[-c7+(c9t+3)][-c7+c8t]-hz[-c7+(c9t+3)-1][-c7+c8t]); ey[-c7+(c9t+3)][-c7+(c8t+1)]=ey[-c7+(c9t+3)][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+(c9t+3)][-c7+(c8t+1)]-hz[-c7+(c9t+3)-1][-c7+(c8t+1)]); ex[-c7+c9t][-c7+c8t]=ex[-c7+c9t][-c7+c8t]-((double)(1))/2*(hz[-c7+c9t][-c7+c8t]-hz[-c7+c9t][-c7+c8t-1]); ex[-c7+c9t][-c7+(c8t+1)]=ex[-c7+c9t][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+c9t][-c7+(c8t+1)]-hz[-c7+c9t][-c7+(c8t+1)-1]); ex[-c7+(c9t+1)][-c7+c8t]=ex[-c7+(c9t+1)][-c7+c8t]-((double)(1))/2*(hz[-c7+(c9t+1)][-c7+c8t]-hz[-c7+(c9t+1)][-c7+c8t-1]); ex[-c7+(c9t+1)][-c7+(c8t+1)]=ex[-c7+(c9t+1)][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+(c9t+1)][-c7+(c8t+1)]-hz[-c7+(c9t+1)][-c7+(c8t+1)-1]); ex[-c7+(c9t+2)][-c7+c8t]=ex[-c7+(c9t+2)][-c7+c8t]-((double)(1))/2*(hz[-c7+(c9t+2)][-c7+c8t]-hz[-c7+(c9t+2)][-c7+c8t-1]); ex[-c7+(c9t+2)][-c7+(c8t+1)]=ex[-c7+(c9t+2)][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+(c9t+2)][-c7+(c8t+1)]-hz[-c7+(c9t+2)][-c7+(c8t+1)-1]); ex[-c7+(c9t+3)][-c7+c8t]=ex[-c7+(c9t+3)][-c7+c8t]-((double)(1))/2*(hz[-c7+(c9t+3)][-c7+c8t]-hz[-c7+(c9t+3)][-c7+c8t-1]); ex[-c7+(c9t+3)][-c7+(c8t+1)]=ex[-c7+(c9t+3)][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+(c9t+3)][-c7+(c8t+1)]-hz[-c7+(c9t+3)][-c7+(c8t+1)-1]); hz[-c7+c9t-1][-c7+c8t-1]=hz[-c7+c9t-1][-c7+c8t-1]-((double)(7))/10*(ey[1+-c7+c9t-1][-c7+c8t-1]+ex[-c7+c9t-1][1+-c7+c8t-1]-ex[-c7+c9t-1][-c7+c8t-1]-ey[-c7+c9t-1][-c7+c8t-1]); hz[-c7+c9t-1][-c7+(c8t+1)-1]=hz[-c7+c9t-1][-c7+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7+c9t-1][-c7+(c8t+1)-1]+ex[-c7+c9t-1][1+-c7+(c8t+1)-1]-ex[-c7+c9t-1][-c7+(c8t+1)-1]-ey[-c7+c9t-1][-c7+(c8t+1)-1]); hz[-c7+(c9t+1)-1][-c7+c8t-1]=hz[-c7+(c9t+1)-1][-c7+c8t-1]-((double)(7))/10*(ey[1+-c7+(c9t+1)-1][-c7+c8t-1]+ex[-c7+(c9t+1)-1][1+-c7+c8t-1]-ex[-c7+(c9t+1)-1][-c7+c8t-1]-ey[-c7+(c9t+1)-1][-c7+c8t-1]); hz[-c7+(c9t+1)-1][-c7+(c8t+1)-1]=hz[-c7+(c9t+1)-1][-c7+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7+(c9t+1)-1][-c7+(c8t+1)-1]+ex[-c7+(c9t+1)-1][1+-c7+(c8t+1)-1]-ex[-c7+(c9t+1)-1][-c7+(c8t+1)-1]-ey[-c7+(c9t+1)-1][-c7+(c8t+1)-1]); hz[-c7+(c9t+2)-1][-c7+c8t-1]=hz[-c7+(c9t+2)-1][-c7+c8t-1]-((double)(7))/10*(ey[1+-c7+(c9t+2)-1][-c7+c8t-1]+ex[-c7+(c9t+2)-1][1+-c7+c8t-1]-ex[-c7+(c9t+2)-1][-c7+c8t-1]-ey[-c7+(c9t+2)-1][-c7+c8t-1]); hz[-c7+(c9t+2)-1][-c7+(c8t+1)-1]=hz[-c7+(c9t+2)-1][-c7+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7+(c9t+2)-1][-c7+(c8t+1)-1]+ex[-c7+(c9t+2)-1][1+-c7+(c8t+1)-1]-ex[-c7+(c9t+2)-1][-c7+(c8t+1)-1]-ey[-c7+(c9t+2)-1][-c7+(c8t+1)-1]); hz[-c7+(c9t+3)-1][-c7+c8t-1]=hz[-c7+(c9t+3)-1][-c7+c8t-1]-((double)(7))/10*(ey[1+-c7+(c9t+3)-1][-c7+c8t-1]+ex[-c7+(c9t+3)-1][1+-c7+c8t-1]-ex[-c7+(c9t+3)-1][-c7+c8t-1]-ey[-c7+(c9t+3)-1][-c7+c8t-1]); hz[-c7+(c9t+3)-1][-c7+(c8t+1)-1]=hz[-c7+(c9t+3)-1][-c7+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7+(c9t+3)-1][-c7+(c8t+1)-1]+ex[-c7+(c9t+3)-1][1+-c7+(c8t+1)-1]-ex[-c7+(c9t+3)-1][-c7+(c8t+1)-1]-ey[-c7+(c9t+3)-1][-c7+(c8t+1)-1]); } register int cbv_9; cbv_9=32*c2+31; #pragma ivdep #pragma vector always for (c8=c8t; c8<=cbv_9; c8=c8+1) { ey[-c7+c9t][-c7+c8]=ey[-c7+c9t][-c7+c8]-((double)(1))/2*(hz[-c7+c9t][-c7+c8]-hz[-c7+c9t-1][-c7+c8]); ey[-c7+(c9t+1)][-c7+c8]=ey[-c7+(c9t+1)][-c7+c8]-((double)(1))/2*(hz[-c7+(c9t+1)][-c7+c8]-hz[-c7+(c9t+1)-1][-c7+c8]); ey[-c7+(c9t+2)][-c7+c8]=ey[-c7+(c9t+2)][-c7+c8]-((double)(1))/2*(hz[-c7+(c9t+2)][-c7+c8]-hz[-c7+(c9t+2)-1][-c7+c8]); ey[-c7+(c9t+3)][-c7+c8]=ey[-c7+(c9t+3)][-c7+c8]-((double)(1))/2*(hz[-c7+(c9t+3)][-c7+c8]-hz[-c7+(c9t+3)-1][-c7+c8]); ex[-c7+c9t][-c7+c8]=ex[-c7+c9t][-c7+c8]-((double)(1))/2*(hz[-c7+c9t][-c7+c8]-hz[-c7+c9t][-c7+c8-1]); ex[-c7+(c9t+1)][-c7+c8]=ex[-c7+(c9t+1)][-c7+c8]-((double)(1))/2*(hz[-c7+(c9t+1)][-c7+c8]-hz[-c7+(c9t+1)][-c7+c8-1]); ex[-c7+(c9t+2)][-c7+c8]=ex[-c7+(c9t+2)][-c7+c8]-((double)(1))/2*(hz[-c7+(c9t+2)][-c7+c8]-hz[-c7+(c9t+2)][-c7+c8-1]); ex[-c7+(c9t+3)][-c7+c8]=ex[-c7+(c9t+3)][-c7+c8]-((double)(1))/2*(hz[-c7+(c9t+3)][-c7+c8]-hz[-c7+(c9t+3)][-c7+c8-1]); hz[-c7+c9t-1][-c7+c8-1]=hz[-c7+c9t-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9t-1][-c7+c8-1]+ex[-c7+c9t-1][1+-c7+c8-1]-ex[-c7+c9t-1][-c7+c8-1]-ey[-c7+c9t-1][-c7+c8-1]); hz[-c7+(c9t+1)-1][-c7+c8-1]=hz[-c7+(c9t+1)-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+(c9t+1)-1][-c7+c8-1]+ex[-c7+(c9t+1)-1][1+-c7+c8-1]-ex[-c7+(c9t+1)-1][-c7+c8-1]-ey[-c7+(c9t+1)-1][-c7+c8-1]); hz[-c7+(c9t+2)-1][-c7+c8-1]=hz[-c7+(c9t+2)-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+(c9t+2)-1][-c7+c8-1]+ex[-c7+(c9t+2)-1][1+-c7+c8-1]-ex[-c7+(c9t+2)-1][-c7+c8-1]-ey[-c7+(c9t+2)-1][-c7+c8-1]); hz[-c7+(c9t+3)-1][-c7+c8-1]=hz[-c7+(c9t+3)-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+(c9t+3)-1][-c7+c8-1]+ex[-c7+(c9t+3)-1][1+-c7+c8-1]-ex[-c7+(c9t+3)-1][-c7+c8-1]-ey[-c7+(c9t+3)-1][-c7+c8-1]); } } for (c9=c9t; c9<=32*c3+31; c9=c9+1) { register int cbv_10, cbv_11; cbv_10=32*c2; cbv_11=32*c2+30; #pragma ivdep #pragma vector always for (c8t=cbv_10; c8t<=cbv_11; c8t=c8t+2) { ey[-c7+c9][-c7+c8t]=ey[-c7+c9][-c7+c8t]-((double)(1))/2*(hz[-c7+c9][-c7+c8t]-hz[-c7+c9-1][-c7+c8t]); ey[-c7+c9][-c7+(c8t+1)]=ey[-c7+c9][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+c9][-c7+(c8t+1)]-hz[-c7+c9-1][-c7+(c8t+1)]); ex[-c7+c9][-c7+c8t]=ex[-c7+c9][-c7+c8t]-((double)(1))/2*(hz[-c7+c9][-c7+c8t]-hz[-c7+c9][-c7+c8t-1]); ex[-c7+c9][-c7+(c8t+1)]=ex[-c7+c9][-c7+(c8t+1)]-((double)(1))/2*(hz[-c7+c9][-c7+(c8t+1)]-hz[-c7+c9][-c7+(c8t+1)-1]); hz[-c7+c9-1][-c7+c8t-1]=hz[-c7+c9-1][-c7+c8t-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8t-1]+ex[-c7+c9-1][1+-c7+c8t-1]-ex[-c7+c9-1][-c7+c8t-1]-ey[-c7+c9-1][-c7+c8t-1]); hz[-c7+c9-1][-c7+(c8t+1)-1]=hz[-c7+c9-1][-c7+(c8t+1)-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+(c8t+1)-1]+ex[-c7+c9-1][1+-c7+(c8t+1)-1]-ex[-c7+c9-1][-c7+(c8t+1)-1]-ey[-c7+c9-1][-c7+(c8t+1)-1]); } register int cbv_12; cbv_12=32*c2+31; #pragma ivdep #pragma vector always for (c8=c8t; c8<=cbv_12; c8=c8+1) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } } /*@ end @*/ for (c7=max(max(max(max(32*c2,32*c3-nx+32),0),16*c1-16*c2),32*c2-ny+32);c7<=min(min(min(tmax-1,32*c3-1),16*c1-16*c2+15),32*c2+30);c7++) { for (c9=32*c3;c9<=32*c3+31;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=32*c2+31;c8++) { for (c9=32*c3;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } if (ny == 1) { for (c7=max(max(max(0,32*c3-nx+1),16*c1-16*c2),32*c2);c7<=min(min(min(32*c3-1,tmax-1),16*c1-16*c2+15),32*c2+30);c7++) { for (c9=32*c3;c9<=min(c7+nx-1,32*c3+31);c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c9=32*c3;c9<=min(c7+nx,32*c3+31);c9++) { hz[-c7+c9-1][0]=hz[-c7+c9-1][0]-((double)(7))/10*(ey[1+-c7+c9-1][0]+ex[-c7+c9-1][1+0]-ex[-c7+c9-1][0]-ey[-c7+c9-1][0]); } } } if ((nx == 1) && (ny >= 2)) { for (c7=max(max(max(16*c3+16,32*c2-ny+1),32*c3),16*c1-16*c2);c7<=min(min(min(tmax-1,32*c2-ny+31),32*c3+30),16*c1-16*c2+15);c7++) { for (c8=max(c7+1,32*c2);c8<=c7+ny-1;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]); } hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]); } } if (nx == 1) { for (c7=max(max(max(16*c3+16,32*c3),16*c1-16*c2),32*c2-ny+32);c7<=min(min(min(tmax-1,16*c1-16*c2+15),32*c2+30),32*c3+30);c7++) { for (c8=max(c7+1,32*c2);c8<=32*c2+31;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]); } } } if ((nx == 1) && (ny == 1)) { for (c7=max(max(max(16*c3+16,16*c1-16*c2),32*c3),32*c2-1);c7<=min(min(min(tmax-1,32*c2+30),16*c1-16*c2+15),32*c3+30);c7++) { hz[0][0]=hz[0][0]-((double)(7))/10*(ey[1+0][0]+ex[0][1+0]-ex[0][0]-ey[0][0]); } } if ((c1 == c2+c3) && (nx >= 2)) { for (c7=max(max(32*c3,0),32*c2-ny+1);c7<=min(min(min(min(32*c2-1,32*c3-nx+31),tmax-1),16*c3+15),32*c2-ny+31);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } for (c9=c7+1;c9<=c7+nx;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } if ((c1 == c2+c3) && (nx >= 2) && (ny >= 2)) { for (c7=max(max(32*c2,32*c3),0);c7<=min(min(min(32*c3-nx+31,tmax-1),16*c3+15),32*c2-ny+31);c7++) { ey[0][0]=c7; for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=c7+ny-1;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } for (c9=c7+1;c9<=c7+nx;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } if ((c1 == c2+c3) && (nx >= 2)) { for (c7=max(max(32*c3,0),32*c2-ny+32);c7<=min(min(min(tmax-1,16*c3+15),32*c2-1),32*c3-nx+31);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } } } if (c1 == c2+c3) { for (c7=max(max(max(32*c3-nx+32,32*c3),0),32*c2-ny+1);c7<=min(min(min(32*c2-1,tmax-1),16*c3+15),32*c2-ny+31);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } for (c9=c7+1;c9<=32*c3+31;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } if ((c1 == c2+c3) && (nx >= 2)) { for (c7=max(max(max(32*c2,32*c3),0),32*c2-ny+32);c7<=min(min(min(tmax-1,16*c3+15),32*c2+30),32*c3-nx+31);c7++) { ey[0][0]=c7; for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=32*c2+31;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } } } if ((c1 == c2+c3) && (ny >= 2)) { for (c7=max(max(max(32*c2,32*c3-nx+32),32*c3),0);c7<=min(min(tmax-1,16*c3+15),32*c2-ny+31);c7++) { ey[0][0]=c7; for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=c7+ny-1;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } for (c9=c7+1;c9<=32*c3+31;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } if (c1 == c2+c3) { for (c7=max(max(max(32*c3,0),32*c3-nx+32),32*c2-ny+32);c7<=min(min(tmax-1,16*c3+15),32*c2-1);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } } if (c1 == c2+c3) { for (c7=max(max(max(max(32*c2,32*c3-nx+32),32*c3),0),32*c2-ny+32);c7<=min(min(tmax-1,16*c3+15),32*c2+30);c7++) { ey[0][0]=c7; for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=32*c2+31;c8++) { ey[0][-c7+c8]=c7; ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } } if ((c1 == c2+c3) && (nx >= 2) && (ny == 1)) { for (c7=max(max(32*c3,0),32*c2);c7<=min(min(tmax-1,16*c3+15),32*c2+30);c7++) { ey[0][0]=c7; for (c9=c7+1;c9<=min(c7+nx-1,32*c3+31);c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c9=c7+1;c9<=min(c7+nx,32*c3+31);c9++) { hz[-c7+c9-1][0]=hz[-c7+c9-1][0]-((double)(7))/10*(ey[1+-c7+c9-1][0]+ex[-c7+c9-1][1+0]-ex[-c7+c9-1][0]-ey[-c7+c9-1][0]); } } } if ((-c1 == -c2-c3) && (c1 <= min(floord(3*c3-1,2),floord(32*c3+tmax-32,32))) && (nx == 1) && (ny == 1)) { ey[0][0]=32*c1-32*c3+31; } if ((-c1 == -c2-c3) && (c1 <= min(floord(3*c3-1,2),floord(32*c3+tmax-32,32))) && (nx == 1) && (ny >= 2)) { ey[0][0]=32*c1-32*c3+31; } if ((c1 >= 3*c2+1) && (c2 <= min(floord(tmax-32,32),c3-1)) && (ny == 1)) { for (c9=32*c3;c9<=min(32*c2+nx+30,32*c3+31);c9++) { ey[-32*c2+c9-31][0]=ey[-32*c2+c9-31][0]-((double)(1))/2*(hz[-32*c2+c9-31][0]-hz[-32*c2+c9-31 -1][0]); } } if ((c1 >= 3*c2+1) && (c2 <= min(floord(tmax-32,32),c3-1)) && (ny >= 2)) { for (c9=32*c3;c9<=min(32*c2+nx+30,32*c3+31);c9++) { ey[-32*c2+c9-31][0]=ey[-32*c2+c9-31][0]-((double)(1))/2*(hz[-32*c2+c9-31][0]-hz[-32*c2+c9-31 -1][0]); } } if (nx >= 2) { for (c7=max(max(max(16*c3+16,32*c3),16*c1-16*c2),32*c2-ny+1);c7<=min(min(min(min(32*c2-1,32*c3-nx+31),tmax-1),32*c2-ny+31),16*c1-16*c2+15);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } for (c9=c7+1;c9<=c7+nx;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } if ((nx >= 2) && (ny >= 2)) { for (c7=max(max(max(32*c2,16*c3+16),32*c3),16*c1-16*c2);c7<=min(min(min(32*c3-nx+31,tmax-1),32*c2-ny+31),16*c1-16*c2+15);c7++) { for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=c7+ny-1;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } for (c9=c7+1;c9<=c7+nx;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } if (nx >= 2) { for (c7=max(max(max(16*c3+16,16*c1-16*c2),32*c3),32*c2-ny+32);c7<=min(min(min(tmax-1,16*c1-16*c2+15),32*c2-1),32*c3-nx+31);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } } } for (c7=max(max(max(max(32*c3-nx+32,16*c3+16),32*c3),16*c1-16*c2),32*c2-ny+1);c7<=min(min(min(min(32*c2-1,tmax-1),32*c2-ny+31),16*c1-16*c2+15),32*c3+30);c7++) { for (c8=32*c2;c8<=c7+ny-1;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } for (c9=c7+1;c9<=32*c3+31;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } if (nx >= 2) { for (c7=max(max(max(max(32*c2,16*c3+16),16*c1-16*c2),32*c3),32*c2-ny+32);c7<=min(min(min(tmax-1,32*c2+30),16*c1-16*c2+15),32*c3-nx+31);c7++) { for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=32*c2+31;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=c7+nx-1;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]); } } } if (ny >= 2) { for (c7=max(max(max(max(32*c2,32*c3-nx+32),16*c3+16),32*c3),16*c1-16*c2);c7<=min(min(min(tmax-1,32*c2-ny+31),16*c1-16*c2+15),32*c3+30);c7++) { for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=c7+ny-1;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } for (c9=c7+1;c9<=32*c3+31;c9++) { hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]); } } } for (c7=max(max(max(max(16*c3+16,16*c1-16*c2),32*c3),32*c3-nx+32),32*c2-ny+32);c7<=min(min(min(tmax-1,32*c3+30),16*c1-16*c2+15),32*c2-1);c7++) { for (c8=32*c2;c8<=32*c2+31;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } for (c7=max(max(max(max(max(32*c2,32*c3-nx+32),16*c3+16),16*c1-16*c2),32*c3),32*c2-ny+32);c7<=min(min(min(tmax-1,32*c3+30),16*c1-16*c2+15),32*c2+30);c7++) { for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c8=c7+1;c8<=32*c2+31;c8++) { ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]); for (c9=c7+1;c9<=32*c3+31;c9++) { ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]); ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]); hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]); } } } if ((nx >= 2) && (ny == 1)) { for (c7=max(max(max(16*c3+16,16*c1-16*c2),32*c3),32*c2);c7<=min(min(min(tmax-1,32*c2+30),32*c3+30),16*c1-16*c2+15);c7++) { for (c9=c7+1;c9<=min(c7+nx-1,32*c3+31);c9++) { ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]); } for (c9=c7+1;c9<=min(32*c3+31,c7+nx);c9++) { hz[-c7+c9-1][0]=hz[-c7+c9-1][0]-((double)(7))/10*(ey[1+-c7+c9-1][0]+ex[-c7+c9-1][1+0]-ex[-c7+c9-1][0]-ey[-c7+c9-1][0]); } } } if ((c1 >= c2+2*c3+1) && (c2 >= ceild(32*c3+1,32)) && (c3 >= 0) && (c3 <= floord(tmax-32,32)) && (nx == 1) && (ny >= 2)) { for (c8=max(32*c3+32,32*c2);c8<=min(32*c3+ny+30,32*c2+31);c8++) { ex[0][-32*c3+c8-31]=ex[0][-32*c3+c8-31]-((double)(1))/2*(hz[0][-32*c3+c8-31]-hz[0][-32*c3+c8-31 -1]); } } if ((-c1 == -c2-c3) && (c1 <= min(floord(3*c3-1,2),floord(32*c3+tmax-32,32))) && (c1 >= ceild(64*c3-31,32)) && (nx >= 2) && (ny == 1)) { ey[0][0]=32*c1-32*c3+31; for (c9=32*c1-32*c3+32;c9<=min(32*c1-32*c3+nx+30,32*c3+31);c9++) { ey[-32*c1+32*c3+c9-31][0]=ey[-32*c1+32*c3+c9-31][0]-((double)(1))/2*(hz[-32*c1+32*c3+c9-31][0]-hz[-32*c1+32*c3+c9-31 -1][0]); } } if ((-c1 == -c2-c3) && (c1 <= min(floord(3*c3-1,2),floord(32*c3+tmax-32,32))) && (c1 >= ceild(64*c3-31,32)) && (nx >= 2) && (ny >= 2)) { ey[0][0]=32*c1-32*c3+31; for (c9=32*c1-32*c3+32;c9<=min(32*c1-32*c3+nx+30,32*c3+31);c9++) { ey[-32*c1+32*c3+c9-31][0]=ey[-32*c1+32*c3+c9-31][0]-((double)(1))/2*(hz[-32*c1+32*c3+c9-31][0]-hz[-32*c1+32*c3+c9-31 -1][0]); } } if ((c1 >= 3*c2+1) && (c2 >= max(ceild(16*c3-15,32),ceild(32*c3-31,32))) && (c2 <= min(floord(tmax-32,32),floord(32*c3-1,32))) && (nx >= 2) && (ny == 1)) { for (c9=32*c2+32;c9<=min(32*c2+nx+30,32*c3+31);c9++) { ey[-32*c2+c9-31][0]=ey[-32*c2+c9-31][0]-((double)(1))/2*(hz[-32*c2+c9-31][0]-hz[-32*c2+c9-31 -1][0]); } } if ((c1 >= 3*c2+1) && (c2 <= min(floord(tmax-32,32),floord(32*c3-1,32))) && (c2 >= max(ceild(16*c3-15,32),ceild(32*c3-31,32))) && (nx >= 2) && (ny >= 2)) { for (c9=32*c2+32;c9<=min(32*c2+nx+30,32*c3+31);c9++) { ey[-32*c2+c9-31][0]=ey[-32*c2+c9-31][0]-((double)(1))/2*(hz[-32*c2+c9-31][0]-hz[-32*c2+c9-31 -1][0]); } } if ((c1 >= c2+2*c3+1) && (c2 >= ceild(32*c3+1,32)) && (c3 >= 0) && (c3 <= floord(tmax-32,32)) && (nx >= 2) && (ny >= 2)) { for (c8=max(32*c3+32,32*c2);c8<=min(32*c3+ny+30,32*c2+31);c8++) { ex[0][-32*c3+c8-31]=ex[0][-32*c3+c8-31]-((double)(1))/2*(hz[0][-32*c3+c8-31]-hz[0][-32*c3+c8-31 -1]); } } } } } /* polysyn end */ /*@ end @*/ /*@ end @*/ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; #ifndef TEST printf("%f\n", annot_t_total); #else { int i,j; for (i=0; i<nx; i++) { for (j=0; j<ny; j++) { if (j%100==0) printf("\n"); printf("%f ",hz[i][j]); } printf("\n"); } } #endif return ((int) hz[0][0]); }
parallel_for_wrapper.h
#pragma once #include <omp.h> #include <thread> #include <opencv2/opencv.hpp> /* On OpenCV 3.2.0, cv::parallel_for_ combined with lambda function is not supported (occurring below error). If version of the library is greater than 3.2.0. "parallel_for_omp" can be replaced by cv::parallel_for_ without loss of lambda function calling. /usr/local/include/opencv2/core/utility.hpp:478:75: note: in passing argument 2 of ‘void cv::parallel_for_(const cv::Range&, const cv::ParallelLoopBody&, double)’ 478 | CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); */ template <class BODY> void parallel_for_omp(const cv::Range& range, BODY body) { #pragma omp parallel for for (int i = range.start; i < range.end; ++i) body(cv::Range(i, i + 1)); }
GB_binop__bxor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint8) // A*D function (colscale): GB (_AxD__bxor_uint8) // D*A function (rowscale): GB (_DxB__bxor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint8) // C=scalar+B GB (_bind1st__bxor_uint8) // C=scalar+B' GB (_bind1st_tran__bxor_uint8) // C=A+scalar GB (_bind2nd__bxor_uint8) // C=A'+scalar GB (_bind2nd_tran__bxor_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT8 || GxB_NO_BXOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
app_main.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "bmp_interface.h" #include <omp.h> extern int __htc_get_unit_count(); extern int global_radius; int app_main(int argc, char **argv) { uint32_t bufsize = 1000; // Allocate target temp buffer. extern void *stencil_cp_alloc(size_t); uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize * sizeof(uint8_t)); printf("unit count is %d\n", __htc_get_unit_count()); int i; #pragma omp target #pragma omp teams distribute parallel for num_teams(4) num_threads(8) for (i = 0; i < bufsize; i++) { printf("team %d thread %d i is %d\n", (int)omp_get_team_num(), (int)omp_get_thread_num(), i); unew[i] = omp_get_team_num() * omp_get_thread_num(); } int sum = 0; for (i = 0; i < bufsize; i++) { // printf("i = %d val = %d\n", i, unew[i]); sum += unew[i]; } printf("sum is %d %s\n", sum, (sum == 5124) ? "PASSED" : "FAILED"); return 0; }
tsne_inl.h
/* * * Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ #ifndef TSNE_INL #define TSNE_INL #include "hdi/dimensionality_reduction/tsne.h" #include "hdi/utils/math_utils.h" #include "hdi/utils/log_helper_functions.h" #include <time.h> #include <cmath> #ifdef __APPLE__ #include <dispatch/dispatch.h> #endif namespace hdi{ namespace dr{ ///////////////////////////////////////////////////////////////////////// template <typename scalar_type> TSNE<scalar_type>::InitParams::InitParams(): _perplexity(30), _seed(0), _embedding_dimensionality(2), _minimum_gain(0.1), _eta(200), _momentum(0.5), _final_momentum(0.8), _mom_switching_iter(250), _exaggeration_factor(4), _remove_exaggeration_iter(250) {} ///////////////////////////////////////////////////////////////////////// template <typename scalar_type> TSNE<scalar_type>::TSNE(): _initialized(false), _dimensionality(0), _logger(nullptr) { } template <typename scalar_type> typename TSNE<scalar_type>::data_handle_type TSNE<scalar_type>::addDataPoint(const scalar_type* ptr){ checkAndThrowLogic(!_initialized,"Class should be uninitialized to add a data-point"); checkAndThrowLogic(_dimensionality > 0,"Invalid dimensionality"); _high_dimensional_data.push_back(ptr); return static_cast<data_handle_type>(_high_dimensional_data.size() - 1); } template <typename scalar_type> void TSNE<scalar_type>::reset(){ _initialized = false; } template <typename scalar_type> void TSNE<scalar_type>::clear(){ _high_dimensional_data.clear(); _embedding->clear(); _initialized = false; } template <typename scalar_type> void TSNE<scalar_type>::getHighDimensionalDescriptor(scalar_vector_type& data_point, data_handle_type handle)const{ data_point.resize(_dimensionality); for(int i = 0; i < _dimensionality; ++i){ data_point[i] = *(_high_dimensional_data[handle]+i); } } template <typename scalar_type> void TSNE<scalar_type>::getEmbeddingPosition(scalar_vector_type& embedding_position, data_handle_type handle)const{ if(!_initialized){ throw std::logic_error("Algorithm must be initialized before "); } embedding_position.resize(_init_params._embedding_dimensionality); for(int i = 0; i < _init_params._embedding_dimensionality; ++i){ embedding_position[i] = _embedding->getContainer()[handle*_init_params._embedding_dimensionality + i]; } } ///////////////////////////////////////////////////////////////////////// template <typename scalar_type> void TSNE<scalar_type>::initialize(data::Embedding<scalar_type>* embedding, InitParams params){ utils::secureLog(_logger,"Initializing tSNE..."); if(size() == 0){ throw std::logic_error("Cannot initialize an empty dataset"); } { _embedding = embedding; int size_sq = size(); size_sq *= size_sq; _P.resize(size_sq); _Q.resize(size_sq); _distances_squared.resize(size_sq); _embedding->resize(params._embedding_dimensionality,size(),0); _embedding_container = &_embedding->getContainer(); _gradient.resize(size()*params._embedding_dimensionality,0); _previous_gradient.resize(size()*params._embedding_dimensionality,0); _gain.resize(size()*params._embedding_dimensionality,1); _sigmas.resize(size()); _init_params = _init_params; } //compute distances between data-points computeHighDimensionalDistances(); //Compute gaussian distributions computeGaussianDistributions(params._perplexity); //Compute High-dimensional distribution computeHighDimensionalDistribution(); //Initialize Embedding position initializeEmbeddingPosition(params._seed); _iteration = 0; _initialized = true; utils::secureLog(_logger,"Initialization complete!"); } template <typename scalar_type> void TSNE<scalar_type>::computeHighDimensionalDistances(){ utils::secureLog(_logger,"Computing High-dimensional distances..."); const int n = size(); #ifdef __APPLE__ std::cout << "GCD dispatch, tsne_inl 165.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j){ #endif //__APPLE__ _distances_squared[j*n + j] = 0; for(int i = j+1; i < n; ++i){ scalar_type res(utils::euclideanDistance<scalar_type>(_high_dimensional_data[i],_high_dimensional_data[i]+_dimensionality, _high_dimensional_data[j],_high_dimensional_data[j]+_dimensionality)); //scalar_type res(utils::euclideanDistanceSquared<scalar_type>(_high_dimensional_data[i],_high_dimensional_data[i]+_dimensionality, _high_dimensional_data[j],_high_dimensional_data[j]+_dimensionality)); _distances_squared[j*n + i] = res; _distances_squared[i*n + j] = res; } } #ifdef __APPLE__ ); #endif } template <typename scalar_type> void TSNE<scalar_type>::computeGaussianDistributions(double perplexity){ utils::secureLog(_logger,"Computing gaussian distributions..."); const int n = size(); #ifdef __APPLE__ std::cout << "GCD dispatch, tsne_inl 189.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j){ #endif //__APPLE__ const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>( _distances_squared.begin() + j*n, _distances_squared.begin() + (j + 1)*n, _P.begin() + j*n, _P.begin() + (j + 1)*n, perplexity, 200, 1e-5, j ); _P[j*n + j] = 0.; _sigmas[j] = static_cast<scalar_type>(sigma); } #ifdef __APPLE__ ); #endif } template <typename scalar_type> void TSNE<scalar_type>::computeHighDimensionalDistribution(){ utils::secureLog(_logger,"Computing high-dimensional joint probability distribution..."); const int n = size(); //#pragma omp parallel for for(int j = 0; j < n; ++j){ for(int i = j+1; i < n; ++i){ const double v = (_P[j*n + i]+_P[i*n + j])*0.5/n; _P[j*n + i] = static_cast<scalar_type>(v); _P[i*n + j] = static_cast<scalar_type>(v); } } } template <typename scalar_type> void TSNE<scalar_type>::initializeEmbeddingPosition(int seed, double multiplier){ utils::secureLog(_logger,"Initializing the embedding..."); if(seed < 0){ std::srand(static_cast<unsigned int>(time(NULL))); } else{ std::srand(seed); } for(auto& v : _embedding->getContainer()){ double x(0.); double y(0.); double radius(0.); do { x = 2 * (rand() / ((double)RAND_MAX + 1)) - 1; y = 2 * (rand() / ((double)RAND_MAX + 1)) - 1; radius = (x * x) + (y * y); } while((radius >= 1.0) || (radius == 0.0)); radius = sqrt(-2 * log(radius) / radius); x *= radius; y *= radius; v = static_cast<scalar_type>(x * multiplier); } } template <typename scalar_type> void TSNE<scalar_type>::doAnIteration(double mult){ if(!_initialized){ throw std::logic_error("Cannot compute a gradient descent iteration on unitialized data"); } if(_iteration == _init_params._mom_switching_iter){ utils::secureLog(_logger,"Switch to final momentum..."); } if(_iteration == _init_params._remove_exaggeration_iter){ utils::secureLog(_logger,"Remove exaggeration..."); } //Compute Low-dimensional distribution computeLowDimensionalDistribution(); //Compute gradient of the KL function computeGradient((_iteration<_init_params._remove_exaggeration_iter)?_init_params._exaggeration_factor:1.); //Compute gradient of the KL function updateTheEmbedding(mult); } template <typename scalar_type> void TSNE<scalar_type>::computeLowDimensionalDistribution(){ const int n = size(); #ifdef __APPLE__ std::cout << "GCD dispatch, tsne_inl 283.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j){ #endif //__APPLE__ _Q[j*n + j] = 0; for(int i = j+1; i < n; ++i){ const double euclidean_dist_sq( utils::euclideanDistanceSquared<scalar_type>( _embedding_container->begin()+j*_init_params._embedding_dimensionality, _embedding_container->begin()+(j+1)*_init_params._embedding_dimensionality, _embedding_container->begin()+i*_init_params._embedding_dimensionality, _embedding_container->begin()+(i+1)*_init_params._embedding_dimensionality ) ); const double v = 1./(1.+euclidean_dist_sq); _Q[j*n + i] = static_cast<scalar_type>(v); _Q[i*n + j] = static_cast<scalar_type>(v); } } #ifdef __APPLE__ ); #endif double sum_Q = 0; for(auto& v : _Q){ sum_Q += v; } _normalization_Q = static_cast<scalar_type>(sum_Q); } template <typename scalar_type> void TSNE<scalar_type>::computeGradient(double exaggeration){ const int n = size(); const int dim = _init_params._embedding_dimensionality; //#pragma omp parallel for for(int i = 0; i < n; ++i){ for(int d = 0; d < dim; ++d){ _gradient[i * dim + d] = 0; double sum_positive(0.); double sum_negative(0.); for(int j = 0; j < n; ++j){ const int idx = i*n + j; const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]); const double positive(_P[idx] * _Q[idx] * distance); const double negative(_Q[idx] * _Q[idx] / _normalization_Q * distance); sum_positive += positive; sum_negative += negative; } _gradient[i * dim + d] = static_cast<scalar_type>(4 * (exaggeration*sum_positive - sum_negative)); } } } //temp template <typename T> T sign(T x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); } template <typename scalar_type> void TSNE<scalar_type>::updateTheEmbedding(double mult){ for(int i = 0; i < _gradient.size(); ++i){ _gain[i] = static_cast<scalar_type>((sign(_gradient[i]) != sign(_previous_gradient[i])) ? (_gain[i] + .2) : (_gain[i] * .8)); if(_gain[i] < _init_params._minimum_gain){ _gain[i] = static_cast<scalar_type>(_init_params._minimum_gain); } _gradient[i] = static_cast<scalar_type>((_gradient[i]>0?1:-1)*std::abs(_gradient[i]*_init_params._eta* _gain[i])/(_init_params._eta*_gain[i])); _previous_gradient[i] = static_cast<scalar_type>(((_iteration<_init_params._mom_switching_iter)?_init_params._momentum:_init_params._final_momentum) * _previous_gradient[i] - _init_params._eta * _gain[i] * _gradient[i]); (*_embedding_container)[i] += _previous_gradient[i] * mult; } ++_iteration; } template <typename scalar_type> double TSNE<scalar_type>::computeKullbackLeiblerDivergence(){ double kl = 0; const int n = size(); for(int j = 0; j < n; ++j){ for(int i = 0; i < n; ++i){ if(i == j) continue; kl += _P[j*n + i] * std::log(_P[j*n + i] / (_Q[j*n + i]/_normalization_Q)); } } return kl; } } } #endif
GeometryConverter.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <unordered_set> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/BuildingModel.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcGloballyUniqueId.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h> #include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include "IncludeCarveHeaders.h" #include "GeometryInputData.h" #include "RepresentationConverter.h" #include "CSG_Adapter.h" class GeometryConverter : public StatusCallback { protected: shared_ptr<BuildingModel> m_ifc_model; shared_ptr<GeometrySettings> m_geom_settings; shared_ptr<RepresentationConverter> m_representation_converter; std::map<std::string, shared_ptr<ProductShapeData> > m_product_shape_data; std::map<std::string, shared_ptr<BuildingObject> > m_map_outside_spatial_structure; double m_recent_progress = 0; double m_csg_eps = 1.5e-05; std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages; #ifdef ENABLE_OPENMP Mutex m_writelock_messages; #endif public: // getters and setters shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; } shared_ptr<RepresentationConverter>& getRepresentationConverter() { return m_representation_converter; } shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; } std::map<std::string, shared_ptr<ProductShapeData> >& getShapeInputData() { return m_product_shape_data; } std::map<std::string, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; } GeometryConverter( shared_ptr<BuildingModel>& ifc_model ) { m_ifc_model = ifc_model; m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() ); resetNumVerticesPerCircle(); shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter(); m_representation_converter = shared_ptr<RepresentationConverter>( new RepresentationConverter( m_geom_settings, unit_converter ) ); // redirect all messages to this->messageTarget m_ifc_model->setMessageTarget( this ); m_representation_converter->setMessageTarget( this ); } virtual ~GeometryConverter() {} void resetModel() { progressTextCallback( L"Unloading model, cleaning up memory..." ); clearInputCache(); m_recent_progress = 0.0; m_ifc_model->clearCache(); m_ifc_model->clearIfcModel(); progressTextCallback( L"Unloading model done" ); progressValueCallback( 0.0, "parse" ); #ifdef _DEBUG GeomDebugDump::clearMeshsetDump(); #endif } void clearInputCache() { m_product_shape_data.clear(); m_map_outside_spatial_structure.clear(); m_representation_converter->clearCache(); m_messages.clear(); } void resetNumVerticesPerCircle() { m_geom_settings->resetNumVerticesPerCircle(); } void setCsgEps(double eps) { m_csg_eps = eps; } void setModel( shared_ptr<BuildingModel> model ) { if( m_ifc_model ) { m_ifc_model->unsetMessageCallBack(); } clearInputCache(); m_ifc_model = model; m_representation_converter->clearCache(); m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() ); m_ifc_model->setMessageTarget( this ); } void resolveProjectStructure( shared_ptr<ProductShapeData>& product_data ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_data->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { return; } product_data->m_added_to_spatial_structure = true; const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_product->m_IsDecomposedBy_inverse; for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii ) { const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii]; if( rel_aggregates_weak_ptr.expired() ) { continue; } shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr ); if( rel_aggregates ) { const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects; for( size_t jj = 0; jj < vec_related_objects.size(); ++jj ) { const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj]; if( related_obj_def ) { std::string related_guid; if (related_obj_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; related_guid = converterX.to_bytes(related_obj_def->m_GlobalId->m_value); } auto it_product_map = m_product_shape_data.find(related_guid); if( it_product_map != m_product_shape_data.end() ) { shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second; if( related_product_shape ) { product_data->addChildProduct( related_product_shape, product_data ); resolveProjectStructure( related_product_shape ); } } } } } } shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_product); if( spatial_ele ) { const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse; for( size_t ii = 0; ii < vec_contains.size(); ++ii ) { const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii]; if( rel_contained_weak_ptr.expired() ) { continue; } shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr ); if( rel_contained ) { const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements; for( size_t jj = 0; jj < vec_related_elements.size(); ++jj ) { const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj]; if( related_product ) { std::string related_guid; if (related_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; related_guid = converterX.to_bytes(related_product->m_GlobalId->m_value); } auto it_product_map = m_product_shape_data.find(related_guid); if( it_product_map != m_product_shape_data.end() ) { shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second; if( related_product_shape ) { product_data->addChildProduct( related_product_shape, product_data ); resolveProjectStructure( related_product_shape ); } } } } } } } // TODO: handle IfcRelAssignsToProduct } void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeData>& product_shape ) { if( !prop_set ) { return; } for( auto& ifc_property : prop_set->m_HasProperties ) { if( !ifc_property ) { continue; } shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property); if( simple_property ) { // ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue, // IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue)) shared_ptr<IfcIdentifier> property_name = simple_property->m_Name; std::wstring name_str = property_name->m_value; if( name_str.compare( L"LayerName" ) == 0 ) { // TODO: implement layers } shared_ptr<IfcText> description = simple_property->m_Description; shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property); if( property_single_value ) { //shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional //shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional } continue; } shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property); if( complex_property ) { if( !complex_property->m_UsageName ) continue; if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 ) { vec4 vec_color; m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color ); shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) ); if( !appearance_data ) { throw OutOfMemoryException( __FUNC__ ); } appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY; appearance_data->m_color_ambient.setColor( vec_color ); appearance_data->m_color_diffuse.setColor( vec_color ); appearance_data->m_color_specular.setColor( vec_color ); appearance_data->m_shininess = 35.f; product_shape->addAppearance( appearance_data ); } } } } /*\brief method convertGeometry: Creates geometry for Carve from previously loaded BuildingModel model. **/ void convertGeometry() { progressTextCallback( L"Creating geometry..." ); progressValueCallback( 0, "geometry" ); m_product_shape_data.clear(); m_map_outside_spatial_structure.clear(); m_representation_converter->clearCache(); if( !m_ifc_model ) { return; } shared_ptr<ProductShapeData> ifc_project_data; std::vector<shared_ptr<IfcObjectDefinition> > vec_object_definitions; double length_to_meter_factor = 1.0; if( m_ifc_model->getUnitConverter() ) { length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor(); } carve::setEpsilon( m_csg_eps ); const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities(); for( auto it = map_entities.begin(); it != map_entities.end(); ++it ) { shared_ptr<BuildingEntity> obj = it->second; shared_ptr<IfcObjectDefinition> product = dynamic_pointer_cast<IfcObjectDefinition>(obj); if(product) { vec_object_definitions.push_back(product); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<std::string, shared_ptr<ProductShapeData> >* map_products_ptr = &m_product_shape_data; const int num_object_definitions = (int)vec_object_definitions.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_object_definitions) shared(map_products_ptr) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,40) #endif for( int i = 0; i < num_object_definitions; ++i ) { shared_ptr<IfcObjectDefinition> ifc_product = vec_object_definitions[i]; const int entity_id = ifc_product->m_entity_id; std::string guid; if (ifc_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value); } shared_ptr<ProductShapeData> product_geom_input_data( new ProductShapeData( entity_id ) ); product_geom_input_data->m_ifc_object_definition = ifc_product; std::stringstream thread_err; if( !m_geom_settings->getRenderObjectFilter()(ifc_product) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_product) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = product_geom_input_data; } try { convertIfcProductShape( product_geom_input_data ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << entity_id; } { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_products_ptr->insert( std::make_pair( guid, product_geom_input_data ) ); if( thread_err.tellp() > 0 ) { messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } } // progress callback double progress = (double)i / (double)num_object_definitions; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "geometry" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif // subtract openings in related objects, such as IFCBUILDINGELEMENTPART connected to a window through IFCRELAGGREGATES for( auto it = map_products_ptr->begin(); it != map_products_ptr->end(); ++it ) { shared_ptr<ProductShapeData> product_geom_input_data = it->second; try { subtractOpeningsInRelatedObjects(product_geom_input_data); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, ""); } catch( carve::exception& e ) { messageCallback(e.str(), StatusCallback::MESSAGE_TYPE_ERROR, ""); } catch( std::exception& e ) { messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, ""); } catch( ... ) { messageCallback("undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__); } } try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data ); } // check if there are entities that are not in spatial structure for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes ) { shared_ptr<ProductShapeData> product_shape = it_product_shapes->second; if( !product_shape ) { continue; } if( !product_shape->m_added_to_spatial_structure ) { if( !product_shape->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def); if( !m_geom_settings->getRenderObjectFilter()(ifc_object_def) ) { continue; } std::string guid; if (ifc_object_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value); } m_map_outside_spatial_structure[guid] = ifc_object_def; } } } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } m_representation_converter->getProfileCache()->clearProfileCache(); progressTextCallback( L"Loading file done" ); progressValueCallback( 1.0, "geometry" ); } //\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertIfcProductShape( shared_ptr<ProductShapeData>& product_shape ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { return; } if( !ifc_product->m_Representation ) { return; } double length_factor = 1.0; if( m_ifc_model ) { if( m_ifc_model->getUnitConverter() ) { length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor(); } } // evaluate IFC geometry shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation; std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations; for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations ) { const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations]; if( !representation ) { continue; } try { shared_ptr<RepresentationData> representation_data( new RepresentationData() ); m_representation_converter->convertIfcRepresentation( representation, representation_data ); product_shape->m_vec_representations.push_back( representation_data ); representation_data->m_parent_product = product_shape; } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } } // IfcProduct has an ObjectPlacement that can be local or global product_shape->m_object_placement = ifc_product->m_ObjectPlacement; if( ifc_product->m_ObjectPlacement ) { // IfcPlacement2Matrix follows related placements in case of local coordinate systems std::unordered_set<IfcObjectPlacement*> placement_already_applied; m_representation_converter->getPlacementConverter()->convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, product_shape, placement_already_applied, false ); } // handle openings std::vector<shared_ptr<ProductShapeData> > vec_opening_data; const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product); if( ifc_element ) { m_representation_converter->subtractOpenings(ifc_element, product_shape); } // Fetch the IFCProduct relationships if( ifc_product->m_IsDefinedBy_inverse.size() > 0 ) { std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse; for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i ) { shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] ); shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition; if( relating_property_definition_select ) { // TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet); shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select); if( property_set_def ) { shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def); if( property_set ) { readAppearanceFromPropertySet( property_set, product_shape ); } continue; } shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select); if( property_set_def_set ) { std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec; std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def; for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def ) { shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def); if( property_set_def2 ) { shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2); if( property_set ) { readAppearanceFromPropertySet( property_set, product_shape ); } } } continue; } } } } } void subtractOpeningsInRelatedObjects(shared_ptr<ProductShapeData>& product_shape) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { return; } shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product); if( !ifc_element ) { return; } if( ifc_element->m_HasOpenings_inverse.size() == 0 ) { return; } // collect aggregated objects const std::vector<weak_ptr<IfcRelAggregates> >& vec_decomposed_by = ifc_element->m_IsDecomposedBy_inverse; for( auto& decomposed_by : vec_decomposed_by ) { if( decomposed_by.expired() ) { continue; } shared_ptr<IfcRelAggregates> decomposed_by_aggregates(decomposed_by); std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = decomposed_by_aggregates->m_RelatedObjects; for( auto& related_object : vec_related_objects ) { if( !related_object ) { continue; } std::string guid; if (related_object->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(related_object->m_GlobalId->m_value); auto it_find_related_shape = m_product_shape_data.find(guid); if( it_find_related_shape != m_product_shape_data.end() ) { shared_ptr<ProductShapeData>& related_product_shape = it_find_related_shape->second; m_representation_converter->subtractOpenings(ifc_element, related_product_shape); } } } } } virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m ) { GeometryConverter* myself = (GeometryConverter*)ptr; if( myself ) { if( m->m_entity ) { #ifdef ENABLE_OPENMP ScopedLock lock( myself->m_writelock_messages ); #endif // make sure that the same message for one entity does not appear several times const int entity_id = m->m_entity->m_entity_id; auto it = myself->m_messages.find( entity_id ); if( it != myself->m_messages.end() ) { std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second; for( size_t i = 0; i < vec_message_for_entity.size(); ++i ) { shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i]; if( existing_message->m_message_text.compare( m->m_message_text ) == 0 ) { // same message for same entity is already there, so ignore message return; } } vec_message_for_entity.push_back( m ); } else { std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second; vec.push_back( m ); } } myself->messageCallback( m ); } } };
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } MagickExport int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double) -(SSIZE_MAX-512)) return((double) -(SSIZE_MAX-512)); if (x > (double) (SSIZE_MAX-512)) return((double) (SSIZE_MAX-512)); return(x); } static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5)); y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (coordinates > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); closed_path=primitive_info[0].closed_subpath; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(MaxStrokePad,MaxStrokePad); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
GB_binop__bor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bor_int32 // A.*B function (eWiseMult): GB_AemultB__bor_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bor_int32 // C+=b function (dense accum): GB_Cdense_accumb__bor_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int32 // C=scalar+B GB_bind1st__bor_int32 // C=scalar+B' GB_bind1st_tran__bor_int32 // C=A+scalar GB_bind2nd__bor_int32 // C=A'+scalar GB_bind2nd_tran__bor_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x) | (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT32 || GxB_NO_BOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bor_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bor_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bor_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bor_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bor_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bor_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB_bind1st_tran__bor_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB_bind2nd_tran__bor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
precond.h
void create_precond_jacobi(int N, const int* row_ptr, const int* col_ind, const double* val, double* M) { #pragma omp parallel for for(size_t i = 0; i < N; i++){ for(size_t j = row_ptr[i]; j < row_ptr[i+1]; j++){ if(i==col_ind[j]){ M[i] = 1 / val[j]; } } } } void apply_precond_jacobi(int N, const double* M, const double* r, double* z) { #pragma omp parallel for for(size_t i = 0; i < N; i++){ z[i] = r[i] * M[i]; } }
pam.c
#include <stdio.h> #include <stdlib.h> #include "../../dataStruct/data.h" #define INF 999999 extern dataIF data; double PamAssign(int L, int kLSH ){ //printf("*****************************************************\n"); double prev = 99999999999; int i=0; int j=0; int k = GetNoOfCluster(); double objFuncJ=0; //for(i=0;i<GetDataSize() - k;i++){ //Loop on points, NOT clusters i = 0; while( i < GetDataSize() - k){ double dist=INF; double dist2 = INF; int kVal=0; int kVal2=0; value IthData; GetIthData(i, &IthData); // #pragma omp parallel // { for(j=0;j<k;j++){ value v; GetIthCentroid(j, &v); double tempDist=data.distance(&IthData, &v); if (tempDist < dist2) { if(tempDist < dist){ kVal2 = kVal; dist2 = dist; kVal=j; dist=tempDist; } else { kVal2 = j; dist2 = tempDist; } } } //AssignClustExtra(i,kVal2,dist2,kVal,dist); AssignClustExtra(i,kVal,dist,kVal2,dist2); //ADD TO CLUSTER KVAL // #pragma omp critical // { if( !AddToClust(i,kVal) ){ objFuncJ+=dist; i++; //printf("asdfsadflkkj\n"); } // } // } } return objFuncJ; }
GB_unaryop__lnot_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint32 // op(A') function: GB_tran__lnot_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint32 ( int16_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(32*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t3+Nx,256),floord(Nt+Nx-4,256)),floord(16*t1+Nx+29,256)),floord(32*t2+Nx+28,256)),floord(32*t1-32*t2+Nz+Nx+27,256));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),256*t4+254),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ReLU.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018-2019 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include "bb/Manager.h" #include "bb/Binarize.h" namespace bb { // ReLU(活性化層) template <typename BinType = float, typename RealType = float> class ReLU : public Binarize<BinType, RealType> { using _super = Binarize<BinType, RealType>; public: static inline std::string ModelName(void) { return "ReLU"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_binary_mode; using _super::m_host_only; protected: ReLU() { m_binary_mode = (DataType<BinType>::type == BB_TYPE_BIT); } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) override { // バイナリモード設定 if ( args.size() == 2 && args[0] == "binary" ) { if ( DataType<BinType>::type != BB_TYPE_BIT ) { m_binary_mode = EvalBool(args[1]); } } // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override { if ( m_binary_mode ) { os << indent << " binary : " << m_binary_mode << std::endl; } _super::PrintInfoText(os, indent, columns, nest, depth); } public: static std::shared_ptr<ReLU> Create(void) { auto self = std::shared_ptr<ReLU>(new ReLU); return self; } ~ReLU() {} // 1ノードのみForward計算 std::vector<double> ForwardNode(index_t node, std::vector<double> x_vec) const override { if ( m_binary_mode ) { return _super::ForwardNode(node, x_vec); } std::vector<double> y_vec; for ( auto x : x_vec ) { y_vec.push_back((x > 0.0) ? x : 0.0); // ReLU } return y_vec; } /** * @brief forward演算 * @detail forward演算を行う * @param x 入力データ * @param train 学習時にtrueを指定 * @return forward演算結果 */ inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override { // binaryモード if ( DataType<BinType>::type == BB_TYPE_BIT || m_binary_mode) { return _super::Forward(x_buf, train); } BB_ASSERT(x_buf.GetType() == DataType<RealType>::type); // 戻り値のサイズ設定 FrameBuffer y_buf(x_buf.GetFrameSize(), x_buf.GetShape(), DataType<BinType>::type); // backward用に保存 if ( train ) { this->PushFrameBuffer(x_buf); // this->PushFrameBuffer(y_buf); } #ifdef BB_WITH_CUDA if ( !m_host_only && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(true); bbcu_fp32_ReLU_Forward( (float const *)ptr_x.GetAddr(), (float *)ptr_y.GetAddr(), (int )x_buf.GetNodeSize(), (int )x_buf.GetFrameSize(), (int )(x_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 ) { // AVX版 index_t frame_size = x_buf.GetFrameSize(); index_t node_size = x_buf.GetNodeSize(); auto x_ptr = x_buf.template LockConst<float>(); auto y_ptr = y_buf.template Lock<float>(true); index_t m256_frame_size = (int)(((frame_size + 7) / 8) * 8); __m256 zero = _mm256_set1_ps(0); for (index_t node = 0; node < node_size; ++node) { auto x_addr = (float const *)x_ptr.GetAddr(node); auto y_addr = (float *)y_ptr.GetAddr(node); for (index_t frame = 0; frame < m256_frame_size; frame += 8) { __m256 in_sig = _mm256_load_ps(&x_addr[frame]); in_sig = _mm256_max_ps(in_sig, zero); _mm256_store_ps(&y_addr[frame], in_sig); } } return y_buf; } { // 汎用版 index_t frame_size = x_buf.GetFrameSize(); index_t node_size = x_buf.GetNodeSize(); auto x_ptr = x_buf.template LockConst<RealType>(); auto y_ptr = y_buf.template Lock<BinType>(); // ReLU #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { for (index_t frame = 0; frame < frame_size; ++frame) { auto x = x_ptr.Get(frame, node); y_ptr.Set(frame, node, x > (RealType)0.0 ? (BinType)x : (BinType)0.0); } } return y_buf; } } /** * @brief backward演算 * @detail backward演算を行う * * @return backward演算結果 */ inline FrameBuffer Backward(FrameBuffer dy_buf) override { if (dy_buf.Empty()) { return FrameBuffer(); } // binaryモード if ( DataType<BinType>::type == BB_TYPE_BIT || m_binary_mode) { return _super::Backward(dy_buf); } BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type); // 戻り値のサイズ設定 FrameBuffer dx_buf(dy_buf.GetFrameSize(), dy_buf.GetShape(), dy_buf.GetType()); // FrameBuffer y_buf = this->PopFrameBuffer(); FrameBuffer x_buf = this->PopFrameBuffer(); #ifdef BB_WITH_CUDA if ( DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // GPU版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_fp32_ReLU_Backward( (float const *)ptr_x.GetAddr(), (float const *)ptr_dy.GetAddr(), (float *)ptr_dx.GetAddr(), (int )dy_buf.GetNodeSize(), (int )dy_buf.GetFrameSize(), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif if ( DataType<RealType>::type == BB_TYPE_FP32 ) { // AVX版 index_t frame_size = dx_buf.GetFrameSize(); index_t node_size = dx_buf.GetNodeSize(); auto x_ptr = x_buf.template LockConst<float>(); // auto y_ptr = y_buf.template LockConst<float>(); auto dy_ptr = dy_buf.template LockConst<float>(); auto dx_ptr = dx_buf.template Lock<float>(true); index_t m256_frame_size = (int)(((frame_size + 7) / 8) * 8); __m256 zero = _mm256_set1_ps(0); for (index_t node = 0; node < node_size; ++node) { auto x_addr = (float *)x_ptr.GetAddr(node); auto dy_addr = (float *)dy_ptr.GetAddr(node); auto dx_addr = (float *)dx_ptr.GetAddr(node); for (index_t frame = 0; frame < m256_frame_size; frame += 8) { __m256 x = _mm256_load_ps(&x_addr[frame]); __m256 dy = _mm256_load_ps(&dy_addr[frame]); __m256 mask = _mm256_cmp_ps(x, zero, _CMP_GT_OS); __m256 dx = _mm256_and_ps(dy, mask); _mm256_store_ps(&dx_addr[frame], dx); } } return dx_buf; } { //汎用版 index_t frame_size = dx_buf.GetFrameSize(); index_t node_size = dx_buf.GetNodeSize(); auto x_ptr = x_buf.template LockConst<BinType>(); auto dy_ptr = dy_buf.template LockConst<RealType>(); auto dx_ptr = dx_buf.template Lock<RealType>(); // ReLU #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { for (index_t frame = 0; frame < frame_size; ++frame) { auto x = x_ptr.Get(frame, node); auto dy = dy_ptr.Get(frame, node); dx_ptr.Set(frame, node, (x > (BinType)0) ? dy : (RealType)0); } } return dx_buf; } } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_binary_mode); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_binary_mode); } }; } // end of file
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++,2* image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *clust; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,0,q); for (clust=head; clust != (Cluster *) NULL; clust=clust->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (clust->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (clust->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (clust->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (clust->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (clust->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (clust->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) clust->id,q); break; } } if (clust == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(double *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(double) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { const Image *next; FxInfo *fx_info; ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info, const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, double const value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireMagickMemory(sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*(*value)); } statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=mean; } if (LocaleNCompare(symbol,"median",6) == 0) { double median; (void) GetImageMedian(image,&median,exception); statistic=median; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=standard_deviation; } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace((int) ((unsigned char) c)) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static inline double FxGCD(const double alpha,const double beta, const size_t depth) { #define FxMaxFunctionDepth 200 if (alpha < beta) return(FxGCD(beta,alpha,depth+1)); if ((fabs(beta) < 0.001) || (depth >= FxMaxFunctionDepth)) return(alpha); return(FxGCD(beta,alpha-beta*floor(alpha/beta),depth+1)); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *artifact, *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; PixelInfo pixel; PointInfo point; ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void) CopyMagickString(name,p,MagickPathExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=length; } else { status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors, ConstantString(name),ClonePixelInfo(&pixel)); p+=length; } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); (void) StripMagickString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } case IndexPixelChannel: return(0.0); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"median",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); artifact=GetImageArtifact(image,symbol); if (artifact != (const char *) NULL) return(StringToDouble(artifact,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; const char *subexpression; int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha((int) ((unsigned char) c)) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="alpha"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type="gray"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6, MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha*PerceptibleReciprocal(*beta*(alpha-1.0)+1.0)); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { double gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); if (IsNaN(alpha) != 0) FxReturn(alpha); gcd=FxGCD(alpha,*beta,0); FxReturn(gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { size_t length; /* Parse if(condition test, true-expression, false-expression). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { /* Round the fraction to nearest integer. */ alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition test, expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1, beta,exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ GlobExpression(fx_info[0]->expression,"debug(",MagickTrue) == 0 ? 1 : 0) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
scheduled-clause.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv) { int i, n = 16,chunk, a[n],suma=0; if(argc < 2) { fprintf(stderr,"\nFalta chunk \n"); exit(-1); } chunk = atoi(argv[1]); for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(dynamic,chunk) for (i=0; i<n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n",omp_get_thread_num(),i,suma); } printf("Fuera de 'parallel for' suma=%d\n",suma); }
GB_subassign_03.c
//------------------------------------------------------------------------------ // GB_subassign_03: C(I,J) += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 03: C(I,J) += scalar ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: scalar // S: constructed #include "GB_subassign_methods.h" GrB_Info GB_subassign_03 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; const bool C_is_hyper = C->is_hyper ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const int64_t Cnvec = C->nvec ; GB_GET_S ; const int64_t *restrict Sh = S->h ; const int64_t Snvec = S->nvec ; const bool S_is_hyper = S->is_hyper ; GB_GET_ACCUM_SCALAR ; //-------------------------------------------------------------------------- // Method 03: C(I,J) += scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required. // Entries in S are found and the corresponding entry in C replaced with // the scalar. // Method 01 and Method 03 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (Si [pS] == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) task_pending++ ; } else { // ----[C A 1] or [X A 1]----------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (Si [pS] == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
plusplus-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Data race on outLen due to ++ operation. // Adding private (outLen) can avoid race condition. But it is wrong semantically. #include <stdlib.h> #include <stdio.h> int input[1000]; int output[1000]; int main() { int i ; int inLen=1000 ; int outLen = 0; for (i=0; i<inLen; ++i) input[i]= i; #pragma omp parallel for for (i=0; i<inLen; ++i) { output[outLen++] = input[i] ; } printf("output[500]=%d\n",output[500]); return 0; }
train5.c
#define _GNU_SOURCE #include <syscall.h> #include <sched.h> #include "graph.h" #include "mainFunctions.h" #include "powerperformacetracking.h" #include "print.h" #include <stdlib.h> #include<unistd.h> #define NO_OF_ARGS 2 //#define REPEAT 25 #define REPEAT 25 long long iters[8]; struct timeval start, end; // We define all additional paramemter here void setaffinity() { } void trainaed(graph* G, int id) { printf("The train aed %d \n", id); char title[50]; sprintf(title, "trainaed_%d.csv",id); gettimeofday(&start, NULL); inittracking(title); double* aed = (double*) malloc (G->numNodes * sizeof(double)); #pragma omp parallel for for (node_t t = 0; t < G->numNodes; t ++) { aed[t] = t/ 1024; } for(int abc=0; abc < REPEAT; abc ++) { #pragma omp parallel { double d = 0.0; int final = 0; #pragma omp for schedule(dynamic,1024) for (node_t t = 0; t < G->numNodes; t ++) { for (edge_t w_idx = G->begin[t];w_idx < G->begin[t+1] ; w_idx ++) { node_t w = G->node_idx [w_idx]; d += aed[w]; final += (G->begin[w+1] - G->begin[w]); } final = final % G->numNodes; } #pragma omp atomic write aed[final] = d; } } for(int i=0; i< 10;i++) { printf("aed[%d] = %f \n", i,aed[i]); } free(aed); endtracking(); gettimeofday(&end, NULL); printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); } #define numTimes 7 /*** * Common entry point for all algorithms, **/ int runalgo(int argc,char** argv) { int i; setaffinity(); graph* G = readGraph(argv[1], argv[2]); for(i = 0;i< numTimes; i++) { printf("Run %d \n", i); trainaed(G,i); sleep(2); } return 0; } inline void kernel(graph *G) { }
myFunc.h
#ifndef USE_CUDA #define __device__ #endif #define restrict #define __declspec(x) // Rob Farber #include <stdlib.h> #include <string.h> #include <stdint.h> #include <malloc.h> #include <math.h> #include <omp.h> #define MIC_DEV 0 #define ALLOC alloc_if(1) free_if(0) #define FREE alloc_if(0) free_if(1) #define REUSE alloc_if(0) free_if(0) // Use a struct to pass and get data from the objective function typedef struct userData { // Data information int nExamples; __declspec(align(64)) float * restrict example; __declspec(align(64)) float * restrict param; #ifdef USE_CUDA float *d_example; float *d_param; double *d_out; #endif // Timing information int isWarmup; double timeObjFunc; int countObjFunc; double timeDataLoad; double minTime, maxTime; } userData_t; // function to measure wall clock time inline double getTime() { return(omp_get_wtime());} #pragma offload_attribute (push, target (mic)) // helper macros to index into the example array #define IN(i,nExamples,j) (i*nExamples+j) #define OUT(i,nExamples,j) ((i+N_INPUT)*nExamples+j) // Define the Sigmoid #ifdef USE_LINEAR char *desc="generated_PCA_func LINEAR()"; __device__ inline float G(float x) { return( x ) ;} #define G_ESTIMATE 0 #elif USE_TANH char *desc="generated_func tanh()"; __device__ inline float G(float x) { return( tanhf(x) ) ;} #define G_ESTIMATE 7 // estimate 7 flops for G #elif LOGISTIC char *desc="generated func logistic()"; __device__ inline float G(float x) { return( 1.f/(1.f+expf(-x)) ) ;} #define G_ESTIMATE 7 // estimate flops for G #else // Use Elliott function char *desc="generated func Eliott activation: x/(1+fabsf(x))"; __device__ inline float G(float x) { return( x/(1.f+fabsf(x)) ) ;} #define G_ESTIMATE 3 // estimate flops for G #endif // This file defines the function to be evaluated __device__ #include "fcn.h" #ifdef USE_CUDA #define N_CONCURRENT_BLOCKS (13*16) __device__ inline void atomicAdd (double *address, double value) { unsigned long long oldval, newval, readback; oldval = __double_as_longlong(*address); newval = __double_as_longlong(__longlong_as_double(oldval) + value); while ((readback=atomicCAS((unsigned long long *)address, oldval, newval)) != oldval) { oldval = readback; newval = __double_as_longlong(__longlong_as_double(oldval) + value); } } template <class T, class T1, unsigned int WARP_SIZE> __global__ void d_objFunc(T* d_param, T *d_example, int nExamples, T1 *out) { __shared__ T1 ssum[WARP_SIZE]; int tid = blockIdx.x*blockDim.x + threadIdx.x; if(tid==0) *out=0.f; if(threadIdx.x < WARP_SIZE) ssum[threadIdx.x] = 0.; register T1 partial=0.f; while(tid < nExamples) { T d= myFunc(tid, d_param, d_example, nExamples, NULL); partial += d*d; //partial += tid; tid += blockDim.x * gridDim.x; } // sum all the partials on each multiprocessor into shared memory ssum[threadIdx.x & (WARP_SIZE-1)] += partial; __syncthreads(); tid = blockIdx.x*blockDim.x + threadIdx.x; volatile T1 *smem = ssum; // sum all threads in each multiprocessor into a single value if(threadIdx.x < 16) smem[threadIdx.x] += smem[threadIdx.x + 16]; if(threadIdx.x < 8) smem[threadIdx.x] += smem[threadIdx.x + 8]; if(threadIdx.x < 4) smem[threadIdx.x] += smem[threadIdx.x + 4]; if(threadIdx.x < 2) smem[threadIdx.x] += smem[threadIdx.x + 2]; if(threadIdx.x < 1) smem[threadIdx.x] += smem[threadIdx.x + 1]; // each thread puts its local sum into shared memory if(threadIdx.x == 0) atomicAdd(out, smem[0]); } #endif #define N_CONCURRENT_BLOCKS (13*16) // The offload objective function double _objFunc(unsigned int n, const double * restrict x, double * restrict grad, void * restrict my_func_data) { double err; userData_t *uData = (userData_t *) my_func_data; // convert from double to float for speed for(int i=0; i < N_PARAM; i++) uData->param[i]=x[i]; int nExamples = uData->nExamples; // compiler workaround __declspec(align(64)) float * restrict example = uData->example; __declspec(align(64)) float * restrict param = uData->param; #pragma acc data copyin(param[0:N_PARAM-1]) pcopyin(example[0:nExamples*EXAMPLE_SIZE-1]) #pragma offload target(mic:MIC_DEV) in(param:length(N_PARAM) REUSE) \ out(err) in(example:length(0) REUSE) { err=0.; // initialize error here in case offload selected #ifdef USE_CUDA cudaError_t ret; ret=cudaMemcpy(uData->d_param, param, sizeof(float)*N_PARAM, cudaMemcpyDefault); if( ret != cudaSuccess) { fprintf(stderr,"CUDA error (cudaMemcpy param): %s\n", cudaGetErrorString(ret)); exit(-1); } d_objFunc<float,double,32><<<N_CONCURRENT_BLOCKS, 32>>>(uData->d_param, uData->d_example, nExamples,uData->d_out); ret=cudaGetLastError(); if( ret != cudaSuccess) { fprintf(stderr,"CUDA error: %s\n", cudaGetErrorString(ret)); exit(-1); } double tmp; ret = cudaMemcpy(&tmp, uData->d_out, sizeof(double), cudaMemcpyDeviceToHost); if( ret != cudaSuccess) { fprintf(stderr,"CUDA memcpy(sum): %s\n", cudaGetErrorString(ret)); exit(-1); } err=tmp; #else #pragma acc parallel loop num_gangs(13*16) vector_length(32) reduction(+:err) #pragma omp parallel for reduction(+ : err) for(int i=0; i < nExamples; i++) { float d=myFunc(i, param, example, nExamples, NULL); err += d*d; } #endif } //fprintf(stderr,"err %g\n", sqrt(err)); return sqrt(err); } #pragma offload_attribute (pop) // The optizimation library callable objective function that gathers timing information double objFunc(unsigned int n, const double * restrict x, double * restrict grad, void * restrict my_func_data) { if(grad) { fprintf(stderr,"Gradient not implemented!\n"); exit(1); } userData_t *uData = (userData_t *) my_func_data; double runTime=getTime(); double err = _objFunc(n,x,grad,my_func_data); runTime = getTime() - runTime; if(!uData->isWarmup) { // Note a maxTime of zero means this is the first call if(uData->maxTime == 0.) { uData->maxTime = uData->minTime = runTime; } uData->maxTime = (uData->maxTime > runTime)?uData->maxTime:runTime; uData->minTime = (uData->minTime < runTime)?uData->minTime:runTime; uData->timeObjFunc += runTime; uData->countObjFunc++; } return( err ); } // Called to free memory and report timing information void fini(userData_t *uData) { int nThreads=0; // Intel recommended way to get the number of threads in offload mode. #pragma offload target(mic:MIC_DEV) out(nThreads) { #pragma omp parallel { #pragma omp single { nThreads = omp_get_num_threads(); } } } // Ouput some information if(!uData->isWarmup) { printf("number OMP threads %d\n", nThreads); printf("DataLoadTime %g\n", uData->timeDataLoad); printf("AveObjTime %g, countObjFunc %d, totalObjTime %g\n", uData->timeObjFunc/uData->countObjFunc, uData->countObjFunc, uData->timeObjFunc); #ifdef FLOP_ESTIMATE printf("Estimated flops in myFunc %d, estimated average GFlop/s %g\n", FLOP_ESTIMATE, (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->timeObjFunc/uData->countObjFunc)/1.e9) ); printf("Estimated maximum GFlop/s %g, minimum GFLop/s %g\n", (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->minTime)/1.e9), (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->maxTime)/1.e9) ); } #endif // free if using offload mode __declspec(align(64)) float * restrict example = uData->example;// compiler workaround __declspec(align(64)) float * restrict param = uData->param;// compiler workaround #pragma offload target(mic:MIC_DEV) in(example: length(0) FREE) in(param : length(0) FREE) {} // free on the host if(uData->example) free(uData->example); uData->example=NULL; if(uData->param) free(uData->param); uData->param=NULL; } void offloadData(userData_t *uData) { #ifdef USE_CUDA cudaError_t err; long exSize=sizeof(float)*EXAMPLE_SIZE*uData->nExamples; if( (err=cudaMalloc((void**) &uData->d_example, exSize)) != cudaSuccess) { fprintf(stderr,"CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } if( (err=cudaMalloc((void**) &uData->d_param, sizeof(float)*N_PARAM)) != cudaSuccess) { fprintf(stderr,"CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } if( (err=cudaMalloc((void**) &uData->d_out, sizeof(double))) != cudaSuccess) { fprintf(stderr,"CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } err=cudaMemcpy(uData->d_example, uData->example, exSize, cudaMemcpyDefault); if( err != cudaSuccess) { fprintf(stderr,"CUDA error (cudaMemcpy example): %s\n", cudaGetErrorString(err)); exit(-1); } #endif #ifdef __INTEL_OFFLOAD int nDevices =_Offload_number_of_devices(); if(nDevices == 0) { fprintf(stderr,"No devices found!\n"); exit -1; } // If necessary, perform offload transfer and allocation double startOffload=getTime(); __declspec(align(64)) float * restrict example = uData->example; // compiler workaround __declspec(align(64)) float * restrict param = uData->param; // compiler workaround int Xsiz = uData->nExamples*EXAMPLE_SIZE; // compiler workaround // Note: the in for param just allocates memory on the device #pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC) in(param : length(N_PARAM) ALLOC) {} // set data load time if using offload mode uData->timeDataLoad = getTime() - startOffload; #endif } // loads the binary file of the form: // nInput, nOutput, nExamples // Input [0] [0:nExamples] // Input [1] [0:nExamples] // ... // Output [0] [0:nExamples] // Output [1] [0:nExamples] // ... void init(char*filename, userData_t *uData) { FILE *fn=stdin; // check if reading from stdin if(strcmp("-", filename) != 0) fn=fopen(filename,"r"); if(!fn) { fprintf(stderr,"Cannot open %s\n",filename); exit(1); } // read the header information double startTime=getTime(); int32_t nInput, nOutput; int32_t nExamples; fread(&nInput,sizeof(int32_t), 1, fn); if(nInput != N_INPUT) { fprintf(stderr,"Number of inputs incorrect!\n"); exit(1); } fread(&nOutput,sizeof(int32_t), 1, fn); if(nOutput != N_OUTPUT) { fprintf(stderr,"Number of outputs incorrect!\n"); exit(1); } fread(&nExamples,sizeof(int32_t), 1, fn); if(nExamples <= 0) { fprintf(stderr,"Number of examples incorrect!\n"); exit(1); } uData->nExamples = nExamples; // aligned allocation of the data uData->example=(float*) memalign(64,nExamples*EXAMPLE_SIZE*sizeof(float)); if(!uData->example) { fprintf(stderr,"Not enough memory for examples!\n"); exit(1); } // aligned allocation of the on-device parameters uData->param=(float*) memalign(64,N_PARAM*sizeof(float)); if(!uData->param) { fprintf(stderr,"Not enough memory for the parameters!\n"); exit(1); } // read the data for(int exIndex=0; exIndex < uData->nExamples; exIndex++) { for(int i=0; i < nInput; i++) fread(&uData->example[IN(i,uData->nExamples, exIndex)],1, sizeof(float), fn); for(int i=0; i < nOutput; i++) fread(&uData->example[OUT(i,uData->nExamples, exIndex)],1, sizeof(float), fn); } // offload the data double startOffload=getTime(); __declspec(align(64)) float * restrict example = uData->example; // compiler workaround __declspec(align(64)) float * restrict param = uData->param; // compiler workaround int Xsiz = uData->nExamples*EXAMPLE_SIZE; // compiler workaround // Note: the in just allocates memory on the device #pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC) in(param : length(N_PARAM) ALLOC) {} #ifdef USE_CUDA offloadData(uData); #endif uData->timeDataLoad = getTime() - startTime; if(fn!=stdin) fclose(fn); }
ParallelOpenMP.h
#pragma once #include <ATen/ATen.h> #include <cstddef> #include <exception> #ifdef _OPENMP #define INTRA_OP_PARALLEL #include <omp.h> #endif namespace at { template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return; } #ifdef _OPENMP std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel if (!omp_in_parallel() && ((end - begin) > grain_size)) { // choose number of tasks based on grain size and number of threads // can't use num_threads clause due to bugs in GOMP's thread pool (See #32008) int64_t num_threads = omp_get_num_threads(); if (grain_size > 0) { num_threads = std::min(num_threads, divup((end - begin), grain_size)); } int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) { try { f(begin_tid, std::min(end, chunk_size + begin_tid)); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } } if (eptr) { std::rethrow_exception(eptr); } #else f(begin, end); #endif } template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F& f, const SF& sf) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return ident; } else if (in_parallel_region() || get_num_threads() == 1) { return f(begin, end, ident); } else { const int64_t num_results = divup((end - begin), grain_size); std::vector<scalar_t> results(num_results); scalar_t* results_data = results.data(); std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel for if ((end - begin) >= grain_size) for (int64_t id = 0; id < num_results; id++) { int64_t i = begin + id * grain_size; try { results_data[id] = f(i, i + std::min(end - i, grain_size), ident); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } if (eptr) { std::rethrow_exception(eptr); } scalar_t result = ident; for (auto partial_result : results) { result = sf(result, partial_result); } return result; } } } // namespace at
jacobi.pluto.h
/* ----------------------------------------------------------------------- Copyright 2013 Pieter Ghysels, University of Antwerp Contact: ghyselsp@gmail.com This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ----------------------------------------------------------------------- */ #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define pmax(x,y) ((x) > (y)? (x) : (y)) #define pmin(x,y) ((x) < (y)? (x) : (y)) // cannot be 8 or below #ifndef TS #define TS 32 #endif #ifndef T3 #define T3 64 #endif void jacobi( GRID* u, GRID* f, GRID* tmp, int nu ) { //printf("Tile sizes: %d %d %d\n", TS, TS, T3); int i, j, k; int N = u->n; int lda = u->lda; double hh = u->h*u->h; double invhh = 1.0 / hh; double DinvXomega = hh/4.0 * 4.0/5.0; double* w = &(tmp->p[0][0]); double* a = &(u->p[0][0]); double* b = &(f->p[0][0]); #ifdef USE_MM_ALLOC __assume_aligned(w,64); __assume_aligned(a,64); __assume_aligned(b,64); #endif if ((N >= 1) && (nu >= 1)) { for (int t1=-1;t1<=floord(nu-1,TS/2);t1++) { int lbp=pmax(ceild(t1,2),ceild(TS*t1-nu+2,TS)); int ubp=pmin(floord(nu+N-1,TS),floord((TS/2)*t1+N+(TS/2-1),TS)); #pragma omp parallel for for (int t2=lbp;t2<=ubp;t2++) { for (int t3=pmax(pmax(0,ceild(t1-1,2)),ceild(TS*t2-N-(T3-2),T3));t3<=pmin(pmin(floord(nu+N-1,T3),floord((TS/2)*t1+N+(TS-1),T3)),floord(TS*t2+N+(TS-2),T3));t3++) { for (int t4=pmax(pmax(pmax(pmax(0,(TS/2)*t1),TS*t2-N),T3*t3-N),TS*t1-TS*t2+1);t4<=pmin(pmin(pmin(pmin(nu-1,(TS/2)*t1+(TS-1)),TS*t2+(TS-2)),T3*t3+(T3-2)),TS*t1-TS*t2+N+(TS-1));t4++) { #pragma loop_count min(1),max(TS),avg(TS/2) for (int t5=pmax(pmax(TS*t2,t4+1),-TS*t1+TS*t2+2*t4-(TS-1));t5<=pmin(pmin(TS*t2+(TS-1),t4+N),-TS*t1+TS*t2+2*t4);t5++) { int lbv=(-t4+t5)*lda-t4+pmax(T3*t3,t4+1); int ubv=(-t4+t5)*lda-t4+pmin(T3*t3+(T3-1),t4+N); int t6l=lbv-1; int t6r=lbv+1; int t6u=lbv+lda; int t6b=lbv-lda; if (t4%2==0) { #pragma loop_count min(1),max(TS),avg((TS/2)) #pragma ivdep #pragma vector always for (int t6=lbv;t6<=ubv;t6++) { w[t6]=a[t6]-DinvXomega*((4.0*a[t6]-a[t6b]-a[t6l]-a[t6u]-a[t6r])*invhh-b[t6]); t6l++; t6r++; t6u++; t6b++; } } else { #pragma loop_count min(1),max(TS),avg((TS/2)) #pragma ivdep #pragma vector always for (int t6=lbv;t6<=ubv;t6++) { a[t6]=w[t6]-DinvXomega*((4.0*w[t6]-w[t6b]-w[t6l]-w[t6u]-w[t6r])*invhh-b[t6]); t6l++; t6r++; t6u++; t6b++; } } } } } } } } if (nu%2==1) { double** t = u->p; u->p = tmp->p; tmp->p = t; } }
randomized_rm.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & University of Surrey for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RANDOMIZED_RM_H_ #define CORE_RANDOMIZED_RM_H_ #include <algorithm> #include "core/resource_manager.h" #ifdef LINUX #include <parallel/algorithm> #endif // LINUX namespace bdm { template <typename TBaseRm> class RandomizedRm : public TBaseRm { public: explicit RandomizedRm(TRootIOCtor* r) {} RandomizedRm(); virtual ~RandomizedRm(); void EndOfIteration() override; protected: BDM_CLASS_DEF_NV(RandomizedRm, 1); }; // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::RandomizedRm() {} // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::~RandomizedRm() {} struct Ubrng { using result_type = uint32_t; Random* random; Ubrng(Random* random) : random(random) {} static constexpr result_type min() { return 0; } static constexpr result_type max() { return std::numeric_limits<result_type>::max(); } result_type operator()() { return random->Integer(std::numeric_limits<result_type>::max()); } }; // ----------------------------------------------------------------------------- template <typename TBaseRm> void RandomizedRm<TBaseRm>::EndOfIteration() { TBaseRm::EndOfIteration(); // shuffle #pragma omp parallel for schedule(static, 1) for (uint64_t n = 0; n < this->agents_.size(); ++n) { #ifdef LINUX __gnu_parallel::random_shuffle(this->agents_[n].begin(), this->agents_[n].end()); #else auto* random = Simulation::GetActive()->GetRandom(); std::shuffle(this->agents_[n].begin(), this->agents_[n].end(), Ubrng(random)); #endif // LINUX } // update uid_ah_map_ auto update_agent_map = L2F([this](Agent* a, AgentHandle ah) { this->uid_ah_map_.Insert(a->GetUid(), ah); }); TBaseRm::ForEachAgentParallel(update_agent_map); } } // namespace bdm #endif // CORE_RANDOMIZED_RM_H_
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold,const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold,const double weighting_exponent, const MagickBooleanType verbose,ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (double *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(double *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; double *free_squares; ExtentPacket blue, green, red; MagickOffsetType progress; MagickStatusType status; ssize_t i; double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(double *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ (void) memset(cluster,0,sizeof(*cluster)); cluster->red=red; cluster->green=green; cluster->blue=blue; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ (void) memset(cluster,0,sizeof(*cluster)); cluster->red=red; cluster->green=green; cluster->blue=blue; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,p)); pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,p)); pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,p)); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=pixel.red; cluster->green.center+=pixel.green; cluster->blue.center+=pixel.blue; cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *c; const PixelInfo *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; SetPixelIndex(image,(Quantum) 0,q); pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,q)); pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,q)); pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,q)); for (c=head; c != (Cluster *) NULL; c=c->next) { if ((pixel.red >= (double) (c->red.left-SafeMargin)) && (pixel.red <= (double) (c->red.right+SafeMargin)) && (pixel.green >= (double) (c->green.left-SafeMargin)) && (pixel.green <= (double) (c->green.right+SafeMargin)) && (pixel.blue >= (double) (c->blue.left-SafeMargin)) && (pixel.blue <= (double) (c->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) c->id,q); break; } } if (c == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared= squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+ squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+ squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared= squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+ squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+ squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; const Quantum *p; ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double b, g, r; r=(double) ScaleQuantumToChar(GetPixelRed(image,p)); g=(double) ScaleQuantumToChar(GetPixelGreen(image,p)); b=(double) ScaleQuantumToChar(GetPixelBlue(image,p)); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if ((r >= (double) (cluster->red.left-SafeMargin)) && (r <= (double) (cluster->red.right+SafeMargin)) && (g >= (double) (cluster->green.left-SafeMargin)) && (g <= (double) (cluster->green.right+SafeMargin)) && (b >= (double) (cluster->blue.left-SafeMargin)) && (b <= (double) (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=r; cluster->green.center+=g; cluster->blue.center+=b; cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { const Quantum *p; ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { ssize_t count; double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { double average_tau, *derivative, *second_derivative, tau, value; IntervalTree **list, *node, *root; MagickBooleanType peak; ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(double *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: Stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau*=PerceptibleReciprocal((double) number_nodes); /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
bt.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - BT This benchmark is an OpenMP C version of the NPB BT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: R. Van der Wijngaart T. Harris M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "header.h" /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void compute_rhs(void); static void set_constants(void); static void verify(int no_time_steps, char *cclass, boolean *verified); static void x_solve(void); static void x_backsubstitute(void); static void x_solve_cell(void); static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]); static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]); static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]); static void binvrhs(double lhs[5][5], double r[5]); static void y_solve(void); static void y_backsubstitute(void); static void y_solve_cell(void); static void z_solve(void); static void z_backsubstitute(void); static void z_solve_cell(void); /*-------------------------------------------------------------------- program BT c-------------------------------------------------------------------*/ int main(int argc, char **argv) { int niter, step, n3; int nthreads = 1; double navg, mflops; double tmax; boolean verified; char cclass; FILE *fp; /*-------------------------------------------------------------------- c Root node reads input file (if it exists) else takes c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - BT Benchmark\n\n"); fp = fopen("inputbt.data", "r"); if (fp != NULL) { printf(" Reading from input file inputbt.data"); fscanf(fp, "%d", &niter); while (fgetc(fp) != '\n'); fscanf(fp, "%lg", &dt); while (fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]); fclose(fp); } else { printf(" No input file inputbt.data. Using compiled defaults\n"); niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if (grid_points[0] > IMAX || grid_points[1] > JMAX || grid_points[2] > KMAX) { printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); #pragma omp parallel { initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); } /* end parallel */ timer_clear(1); timer_start(1); #pragma omp parallel firstprivate(niter) private(step) { for (step = 1; step <= niter; step++) { if (step%20 == 0 || step == 1) { #pragma omp master printf(" Time step %4d\n", step); } adi(); } #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &cclass, &verified); n3 = grid_points[0]*grid_points[1]*grid_points[2]; navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; if ( fabs(tmax-0.0)>1.0e-5 ) { //if ( tmax != 0.0 ) { mflops = 1.0e-6*(double)niter* (3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax; } else { mflops = 0.0; } c_print_results("BT", cclass, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void add(void) { /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ int i, j, k, m; #pragma omp for private(j,k,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { compute_rhs(); x_solve(); y_solve(); z_solve(); add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[i][j][k][m] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { add = rhs[i][j][k][m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for private(k,i,m) for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m <= 4; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i < grid_points[0]-1; i++) { im1 = i-1; ip1 = i+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tx2*(ue[ip1][1]-ue[im1][1])+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { i = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (m = 0; m < 5; m++) { for (i = 1*3; i <= grid_points[0]-3*1-1; i++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for private(k,j,m) for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j < grid_points[1]-1; j++) { jm1 = j-1; jp1 = j+1; forcing[i][j][k][0] = forcing[i][j][k][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { j = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (m = 0; m < 5; m++) { for (j = 1*3; j <= grid_points[1]-3*1-1; j++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k < grid_points[2]-1; k++) { km1 = k-1; kp1 = k+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { k = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (m = 0; m < 5; m++) { for (k = 1*3; k <= grid_points[2]-3*1-1; k++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7] + xi*ce[m][10]))) + eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8] + eta*ce[m][11])))+ zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + zeta*ce[m][12]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < IMAX; i++) { for (j = 0; j < IMAX; j++) { for (k = 0; k < IMAX; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = 1.0; } } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ #pragma omp for private(j,k,ix,iy,iz,m) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &(Pface[ix][0][0])); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ i = 0; xi = 0.0; #pragma omp for private(k,m) nowait for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ i = grid_points[0]-1; xi = 1.0; #pragma omp for private(k,m) for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ j = 0; eta = 0.0; #pragma omp for private(k,m) nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ j = grid_points[1]-1; eta = 1.0; #pragma omp for private(k,m) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ k = 0; zeta = 0.0; #pragma omp for private(j,m) nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ k = grid_points[2]-1; zeta = 1.0; #pragma omp for private(j,m) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { int i, j, k, m, n; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zero the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m,n) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { lhs[i][j][k][0][m][n] = 0.0; lhs[i][j][k][1][m][n] = 0.0; lhs[i][j][k][2][m][n] = 0.0; } } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but convenient c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { lhs[i][j][k][1][m][m] = 1.0; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side in the xi-direction c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c determine a (labeled f) and n jacobians c-------------------------------------------------------------------*/ #pragma omp for private(k,i) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (i = 0; i < grid_points[0]; i++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 1.0; fjac[ i][ j][ k][0][2] = 0.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = ( 2.0 - c2 ) * ( u[i][j][k][1] / u[i][j][k][0] ); fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 ); fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][1][4] = c2; fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][2][3] = 0.0; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][2] = 0.0; fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][1] * tmp1 ); fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( 3.0*u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 ); njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = con43 * c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in x direction c-------------------------------------------------------------------*/ for (i = 1; i < grid_points[0]-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0] - tmp1 * njac[i-1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1] - tmp1 * njac[i-1][j][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2] - tmp1 * njac[i-1][j][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3] - tmp1 * njac[i-1][j][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4] - tmp1 * njac[i-1][j][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0] - tmp1 * njac[i-1][j][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1] - tmp1 * njac[i-1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2] - tmp1 * njac[i-1][j][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3] - tmp1 * njac[i-1][j][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4] - tmp1 * njac[i-1][j][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0] - tmp1 * njac[i-1][j][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1] - tmp1 * njac[i-1][j][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2] - tmp1 * njac[i-1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3] - tmp1 * njac[i-1][j][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4] - tmp1 * njac[i-1][j][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0] - tmp1 * njac[i-1][j][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1] - tmp1 * njac[i-1][j][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2] - tmp1 * njac[i-1][j][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3] - tmp1 * njac[i-1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4] - tmp1 * njac[i-1][j][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0] - tmp1 * njac[i-1][j][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1] - tmp1 * njac[i-1][j][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2] - tmp1 * njac[i-1][j][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3] - tmp1 * njac[i-1][j][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4] - tmp1 * njac[i-1][j][k][4][4] - tmp1 * dx5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0] - tmp1 * njac[i+1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1] - tmp1 * njac[i+1][j][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2] - tmp1 * njac[i+1][j][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3] - tmp1 * njac[i+1][j][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4] - tmp1 * njac[i+1][j][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0] - tmp1 * njac[i+1][j][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1] - tmp1 * njac[i+1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2] - tmp1 * njac[i+1][j][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3] - tmp1 * njac[i+1][j][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4] - tmp1 * njac[i+1][j][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0] - tmp1 * njac[i+1][j][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1] - tmp1 * njac[i+1][j][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2] - tmp1 * njac[i+1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3] - tmp1 * njac[i+1][j][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4] - tmp1 * njac[i+1][j][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0] - tmp1 * njac[i+1][j][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1] - tmp1 * njac[i+1][j][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2] - tmp1 * njac[i+1][j][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3] - tmp1 * njac[i+1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4] - tmp1 * njac[i+1][j][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0] - tmp1 * njac[i+1][j][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1] - tmp1 * njac[i+1][j][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2] - tmp1 * njac[i+1][j][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3] - tmp1 * njac[i+1][j][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4] - tmp1 * njac[i+1][j][k][4][4] - tmp1 * dx5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the tri-diagonal matrix; c determine a (labeled f) and n jacobians for cell c c-------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 0.0; fjac[ i][ j][ k][0][2] = 1.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][3] = 0.0; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][2][2] = ( 2.0 - c2 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1; fjac[i][j][k][2][4] = c2; fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = 0.0; fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * u[i][j][k][4] * tmp1 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] * tmp2; fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + 3.0 * u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = con43 * c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } } } /*-------------------------------------------------------------------- c now joacobians set, so form left hand side in y direction c-------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0] - tmp1 * njac[i][j-1][k][0][0] - tmp1 * dy1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1] - tmp1 * njac[i][j-1][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2] - tmp1 * njac[i][j-1][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3] - tmp1 * njac[i][j-1][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4] - tmp1 * njac[i][j-1][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0] - tmp1 * njac[i][j-1][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1] - tmp1 * njac[i][j-1][k][1][1] - tmp1 * dy2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2] - tmp1 * njac[i][j-1][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3] - tmp1 * njac[i][j-1][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4] - tmp1 * njac[i][j-1][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0] - tmp1 * njac[i][j-1][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1] - tmp1 * njac[i][j-1][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2] - tmp1 * njac[i][j-1][k][2][2] - tmp1 * dy3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3] - tmp1 * njac[i][j-1][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4] - tmp1 * njac[i][j-1][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0] - tmp1 * njac[i][j-1][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1] - tmp1 * njac[i][j-1][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2] - tmp1 * njac[i][j-1][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3] - tmp1 * njac[i][j-1][k][3][3] - tmp1 * dy4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4] - tmp1 * njac[i][j-1][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0] - tmp1 * njac[i][j-1][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1] - tmp1 * njac[i][j-1][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2] - tmp1 * njac[i][j-1][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3] - tmp1 * njac[i][j-1][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4] - tmp1 * njac[i][j-1][k][4][4] - tmp1 * dy5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0] - tmp1 * njac[i][j+1][k][0][0] - tmp1 * dy1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1] - tmp1 * njac[i][j+1][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2] - tmp1 * njac[i][j+1][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3] - tmp1 * njac[i][j+1][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4] - tmp1 * njac[i][j+1][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0] - tmp1 * njac[i][j+1][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1] - tmp1 * njac[i][j+1][k][1][1] - tmp1 * dy2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2] - tmp1 * njac[i][j+1][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3] - tmp1 * njac[i][j+1][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4] - tmp1 * njac[i][j+1][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0] - tmp1 * njac[i][j+1][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1] - tmp1 * njac[i][j+1][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2] - tmp1 * njac[i][j+1][k][2][2] - tmp1 * dy3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3] - tmp1 * njac[i][j+1][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4] - tmp1 * njac[i][j+1][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0] - tmp1 * njac[i][j+1][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1] - tmp1 * njac[i][j+1][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2] - tmp1 * njac[i][j+1][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3] - tmp1 * njac[i][j+1][k][3][3] - tmp1 * dy4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4] - tmp1 * njac[i][j+1][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0] - tmp1 * njac[i][j+1][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1] - tmp1 * njac[i][j+1][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2] - tmp1 * njac[i][j+1][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3] - tmp1 * njac[i][j+1][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4] - tmp1 * njac[i][j+1][k][4][4] - tmp1 * dy5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the block-diagonal matrix; c determine c (labeled f) and s jacobians c---------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 0; k < grid_points[2]; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][j][k][0][0] = 0.0; fjac[i][j][k][0][1] = 0.0; fjac[i][j][k][0][2] = 0.0; fjac[i][j][k][0][3] = 1.0; fjac[i][j][k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][1][2] = 0.0; fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][2][1] = 0.0; fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1; fjac[i][j][k][3][3] = ( 2.0 - c2 ) * u[i][j][k][3] * tmp1; fjac[i][j][k][3][4] = c2; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 ) - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + 3.0*u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 )* tmp1; } } } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in z direction c-------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0] - tmp1 * njac[i][j][k-1][0][0] - tmp1 * dz1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1] - tmp1 * njac[i][j][k-1][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2] - tmp1 * njac[i][j][k-1][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3] - tmp1 * njac[i][j][k-1][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4] - tmp1 * njac[i][j][k-1][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0] - tmp1 * njac[i][j][k-1][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1] - tmp1 * njac[i][j][k-1][1][1] - tmp1 * dz2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2] - tmp1 * njac[i][j][k-1][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3] - tmp1 * njac[i][j][k-1][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4] - tmp1 * njac[i][j][k-1][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0] - tmp1 * njac[i][j][k-1][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1] - tmp1 * njac[i][j][k-1][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2] - tmp1 * njac[i][j][k-1][2][2] - tmp1 * dz3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3] - tmp1 * njac[i][j][k-1][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4] - tmp1 * njac[i][j][k-1][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0] - tmp1 * njac[i][j][k-1][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1] - tmp1 * njac[i][j][k-1][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2] - tmp1 * njac[i][j][k-1][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3] - tmp1 * njac[i][j][k-1][3][3] - tmp1 * dz4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4] - tmp1 * njac[i][j][k-1][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0] - tmp1 * njac[i][j][k-1][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1] - tmp1 * njac[i][j][k-1][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2] - tmp1 * njac[i][j][k-1][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3] - tmp1 * njac[i][j][k-1][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4] - tmp1 * njac[i][j][k-1][4][4] - tmp1 * dz5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0] - tmp1 * njac[i][j][k+1][0][0] - tmp1 * dz1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1] - tmp1 * njac[i][j][k+1][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2] - tmp1 * njac[i][j][k+1][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3] - tmp1 * njac[i][j][k+1][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4] - tmp1 * njac[i][j][k+1][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0] - tmp1 * njac[i][j][k+1][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1] - tmp1 * njac[i][j][k+1][1][1] - tmp1 * dz2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2] - tmp1 * njac[i][j][k+1][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3] - tmp1 * njac[i][j][k+1][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4] - tmp1 * njac[i][j][k+1][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0] - tmp1 * njac[i][j][k+1][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1] - tmp1 * njac[i][j][k+1][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2] - tmp1 * njac[i][j][k+1][2][2] - tmp1 * dz3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3] - tmp1 * njac[i][j][k+1][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4] - tmp1 * njac[i][j][k+1][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0] - tmp1 * njac[i][j][k+1][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1] - tmp1 * njac[i][j][k+1][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2] - tmp1 * njac[i][j][k+1][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3] - tmp1 * njac[i][j][k+1][3][3] - tmp1 * dz4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4] - tmp1 * njac[i][j][k+1][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0] - tmp1 * njac[i][j][k+1][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1] - tmp1 * njac[i][j][k+1][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2] - tmp1 * njac[i][j][k+1][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3] - tmp1 * njac[i][j][k+1][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4] - tmp1 * njac[i][j][k+1][4][4] - tmp1 * dz5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { int i, j, k, m; double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp for private(j,k) nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { rho_inv = 1.0/u[i][j][k][0]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[i][j][k][1] * rho_inv; vs[i][j][k] = u[i][j][k][2] * rho_inv; ws[i][j][k] = u[i][j][k][3] * rho_inv; square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = forcing[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + u[i-1][j][k][0]) - tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]); rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + u[i-1][j][k][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[i+1][j][k][1]*up1 - u[i-1][j][k][1]*um1 + (u[i+1][j][k][4]- square[i+1][j][k]- u[i-1][j][k][4]+ square[i-1][j][k])* c2); rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i+1][j][k][2] - 2.0*u[i][j][k][2] + u[i-1][j][k][2]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[i+1][j][k][2]*up1 - u[i-1][j][k][2]*um1); rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i+1][j][k][3] - 2.0*u[i][j][k][3] + u[i-1][j][k][3]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[i+1][j][k][3]*up1 - u[i-1][j][k][3]*um1); rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i+1][j][k][4] - 2.0*u[i][j][k][4] + u[i-1][j][k][4]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i-1][j][k][4]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[i+1][j][k][4] - c2*square[i+1][j][k])*up1 - (c1*u[i-1][j][k][4] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for private(k,m) nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } i = 2; #pragma omp for private(k,m) nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } #pragma omp for private(j,k,m) nowait for (i = 3; i < grid_points[0]-3; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m] ); } } } } i = grid_points[0]-3; #pragma omp for private(k,m) nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] ); } } } i = grid_points[0]-2; #pragma omp for private(k,m) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] + 5.0*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + u[i][j-1][k][0]) - ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]); rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + u[i][j-1][k][1]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[i][j+1][k][1]*vp1 - u[i][j-1][k][1]*vm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + u[i][j-1][k][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[i][j+1][k][2]*vp1 - u[i][j-1][k][2]*vm1 + (u[i][j+1][k][4] - square[i][j+1][k] - u[i][j-1][k][4] + square[i][j-1][k]) *c2); rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + u[i][j-1][k][3]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[i][j+1][k][3]*vp1 - u[i][j-1][k][3]*vm1); rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + u[i][j-1][k][4]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j-1][k][4]*rho_i[i][j-1][k]) - ty2 * ((c1*u[i][j+1][k][4] - c2*square[i][j+1][k]) * vp1 - (c1*u[i][j-1][k][4] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for private(k,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } j = 2; #pragma omp for private(k,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } #pragma omp for private(j,k,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 3; j < grid_points[1]-3; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m] ); } } } } j = grid_points[1]-3; #pragma omp for private(k,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] ); } } } j = grid_points[1]-2; #pragma omp for private(k,m) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] + 5.*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for private(j,k) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + u[i][j][k-1][0]) - tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]); rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + u[i][j][k-1][1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[i][j][k+1][1]*wp1 - u[i][j][k-1][1]*wm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + u[i][j][k-1][2]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[i][j][k+1][2]*wp1 - u[i][j][k-1][2]*wm1); rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + u[i][j][k-1][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[i][j][k+1][3]*wp1 - u[i][j][k-1][3]*wm1 + (u[i][j][k+1][4] - square[i][j][k+1] - u[i][j][k-1][4] + square[i][j][k-1]) *c2); rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + u[i][j][k-1][4]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j][k-1][4]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[i][j][k+1][4] - c2*square[i][j][k+1])*wp1 - (c1*u[i][j][k-1][4] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for private(j,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } k = 2; #pragma omp for private(j,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } #pragma omp for private(j,k,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 3; k < grid_points[2]-3; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m] ); } } } } k = grid_points[2]-3; #pragma omp for private(j,m) nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] ); } } } k = grid_points[2]-2; #pragma omp for private(j,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 5.0*u[i][j][k][m] ); } } } #pragma omp for private(k,m,i) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { for (i = 1; i < grid_points[0]-1; i++) { rhs[i][j][k][m] = rhs[i][j][k][m] * dt; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *cclass, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine c-------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level c-------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing c-------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *cclass = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02 c-------------------------------------------------------------------*/ if (grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 60) { *cclass = 'S'; dtref = 1.0e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.7034283709541311e-01; xcrref[1] = 1.2975252070034097e-02; xcrref[2] = 3.2527926989486055e-02; xcrref[3] = 2.6436421275166801e-02; xcrref[4] = 1.9211784131744430e-01; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.9976913345811579e-04; xceref[1] = 4.5195666782961927e-05; xceref[2] = 7.3973765172921357e-05; xceref[3] = 7.3821238632439731e-05; xceref[4] = 8.9269630987491446e-04; /*-------------------------------------------------------------------- c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 24 && grid_points[1] == 24 && grid_points[2] == 24 && no_time_steps == 200) { *cclass = 'W'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.1125590409344e+03; xcrref[1] = 0.1180007595731e+02; xcrref[2] = 0.2710329767846e+02; xcrref[3] = 0.2469174937669e+02; xcrref[4] = 0.2638427874317e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.4419655736008e+01; xceref[1] = 0.4638531260002e+00; xceref[2] = 0.1011551749967e+01; xceref[3] = 0.9235878729944e+00; xceref[4] = 0.1018045837718e+02; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 200) { *cclass = 'A'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.0806346714637264e+02; xcrref[1] = 1.1319730901220813e+01; xcrref[2] = 2.5974354511582465e+01; xcrref[3] = 2.3665622544678910e+01; xcrref[4] = 2.5278963211748344e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.2348416040525025e+00; xceref[1] = 4.4390282496995698e-01; xceref[2] = 9.6692480136345650e-01; xceref[3] = 8.8302063039765474e-01; xceref[4] = 9.7379901770829278e+00; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 200 time steps, c with DT = 3.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 200) { *cclass = 'B'; dtref = 3.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.4233597229287254e+03; xcrref[1] = 9.9330522590150238e+01; xcrref[2] = 3.5646025644535285e+02; xcrref[3] = 3.2485447959084092e+02; xcrref[4] = 3.2707541254659363e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 5.2969847140936856e+01; xceref[1] = 4.4632896115670668e+00; xceref[2] = 1.3122573342210174e+01; xceref[3] = 1.2006925323559144e+01; xceref[4] = 1.2459576151035986e+02; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 200 time steps, c with DT = 1.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 200) { *cclass = 'C'; dtref = 1.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.62398116551764615e+04; xcrref[1] = 0.50793239190423964e+03; xcrref[2] = 0.15423530093013596e+04; xcrref[3] = 0.13302387929291190e+04; xcrref[4] = 0.11604087428436455e+05; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.16462008369091265e+03; xceref[1] = 0.11497107903824313e+02; xceref[2] = 0.41207446207461508e+02; xceref[3] = 0.37087651059694167e+02; xceref[4] = 0.36211053051841265e+03; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. c-------------------------------------------------------------------*/ if (*cclass != 'U') { printf(" Verification being performed for class %1c\n", *cclass); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *cclass = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*cclass != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*cclass == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if (*cclass != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*cclass == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if (*cclass == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified == TRUE) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c c Performs line solves in X direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c c-------------------------------------------------------------------*/ lhsx(); x_solve_cell(); x_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(isize)=rhs[isize) c else assume U(isize) is loaded in un pack backsub_info c so just use it c after call u(istart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (i = grid_points[0]-2; i >= 0; i--) { #pragma omp for private(k,m,n) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve_cell(void) { /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(IMAX) and rhs'(IMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i,j,k,isize; isize = grid_points[0]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for private(k) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(0,j,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[0][j][k][BB], lhs[0][j][k][CC], rhs[0][j][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (i = 1; i < isize; i++) { #pragma omp for private(k) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(i) = rhs(i) - A*rhs(i-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i-1][j][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(i) = B(i) - C(i-1)*A(i) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i-1][j][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for private(k) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(isize) = rhs(isize) - A*rhs(isize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[isize][j][k][AA], rhs[isize-1][j][k], rhs[isize][j][k]); /*-------------------------------------------------------------------- c B(isize) = B(isize) - C(isize-1)*A(isize) c-------------------------------------------------------------------*/ matmul_sub(lhs[isize][j][k][AA], lhs[isize-1][j][k][CC], lhs[isize][j][k][BB]); /*-------------------------------------------------------------------- c multiply rhs() by b_inverse() and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][k][BB], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts bvec=bvec - ablock*avec c-------------------------------------------------------------------*/ int i; for (i = 0; i < 5; i++) { /*-------------------------------------------------------------------- c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) c $ - lhs[i,1,ablock,ia,ja,ka,acell)* c-------------------------------------------------------------------*/ bvec[i] = bvec[i] - ablock[i][0]*avec[0] - ablock[i][1]*avec[1] - ablock[i][2]*avec[2] - ablock[i][3]*avec[3] - ablock[i][4]*avec[4]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k) c-------------------------------------------------------------------*/ int j; for (j = 0; j < 5; j++) { cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j] - ablock[0][1]*bblock[1][j] - ablock[0][2]*bblock[2][j] - ablock[0][3]*bblock[3][j] - ablock[0][4]*bblock[4][j]; cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j] - ablock[1][1]*bblock[1][j] - ablock[1][2]*bblock[2][j] - ablock[1][3]*bblock[3][j] - ablock[1][4]*bblock[4][j]; cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j] - ablock[2][1]*bblock[1][j] - ablock[2][2]*bblock[2][j] - ablock[2][3]*bblock[3][j] - ablock[2][4]*bblock[4][j]; cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j] - ablock[3][1]*bblock[1][j] - ablock[3][2]*bblock[2][j] - ablock[3][3]*bblock[3][j] - ablock[3][4]*bblock[4][j]; cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j] - ablock[4][1]*bblock[1][j] - ablock[4][2]*bblock[2][j] - ablock[4][3]*bblock[3][j] - ablock[4][4]*bblock[4][j]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; c[0][0] = c[0][0]*pivot; c[0][1] = c[0][1]*pivot; c[0][2] = c[0][2]*pivot; c[0][3] = c[0][3]*pivot; c[0][4] = c[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; c[1][0] = c[1][0] - coeff*c[0][0]; c[1][1] = c[1][1] - coeff*c[0][1]; c[1][2] = c[1][2] - coeff*c[0][2]; c[1][3] = c[1][3] - coeff*c[0][3]; c[1][4] = c[1][4] - coeff*c[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; c[2][0] = c[2][0] - coeff*c[0][0]; c[2][1] = c[2][1] - coeff*c[0][1]; c[2][2] = c[2][2] - coeff*c[0][2]; c[2][3] = c[2][3] - coeff*c[0][3]; c[2][4] = c[2][4] - coeff*c[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; c[3][0] = c[3][0] - coeff*c[0][0]; c[3][1] = c[3][1] - coeff*c[0][1]; c[3][2] = c[3][2] - coeff*c[0][2]; c[3][3] = c[3][3] - coeff*c[0][3]; c[3][4] = c[3][4] - coeff*c[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; c[4][0] = c[4][0] - coeff*c[0][0]; c[4][1] = c[4][1] - coeff*c[0][1]; c[4][2] = c[4][2] - coeff*c[0][2]; c[4][3] = c[4][3] - coeff*c[0][3]; c[4][4] = c[4][4] - coeff*c[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; c[1][0] = c[1][0]*pivot; c[1][1] = c[1][1]*pivot; c[1][2] = c[1][2]*pivot; c[1][3] = c[1][3]*pivot; c[1][4] = c[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; c[0][0] = c[0][0] - coeff*c[1][0]; c[0][1] = c[0][1] - coeff*c[1][1]; c[0][2] = c[0][2] - coeff*c[1][2]; c[0][3] = c[0][3] - coeff*c[1][3]; c[0][4] = c[0][4] - coeff*c[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; c[2][0] = c[2][0] - coeff*c[1][0]; c[2][1] = c[2][1] - coeff*c[1][1]; c[2][2] = c[2][2] - coeff*c[1][2]; c[2][3] = c[2][3] - coeff*c[1][3]; c[2][4] = c[2][4] - coeff*c[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; c[3][0] = c[3][0] - coeff*c[1][0]; c[3][1] = c[3][1] - coeff*c[1][1]; c[3][2] = c[3][2] - coeff*c[1][2]; c[3][3] = c[3][3] - coeff*c[1][3]; c[3][4] = c[3][4] - coeff*c[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; c[4][0] = c[4][0] - coeff*c[1][0]; c[4][1] = c[4][1] - coeff*c[1][1]; c[4][2] = c[4][2] - coeff*c[1][2]; c[4][3] = c[4][3] - coeff*c[1][3]; c[4][4] = c[4][4] - coeff*c[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; c[2][0] = c[2][0]*pivot; c[2][1] = c[2][1]*pivot; c[2][2] = c[2][2]*pivot; c[2][3] = c[2][3]*pivot; c[2][4] = c[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; c[0][0] = c[0][0] - coeff*c[2][0]; c[0][1] = c[0][1] - coeff*c[2][1]; c[0][2] = c[0][2] - coeff*c[2][2]; c[0][3] = c[0][3] - coeff*c[2][3]; c[0][4] = c[0][4] - coeff*c[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; c[1][0] = c[1][0] - coeff*c[2][0]; c[1][1] = c[1][1] - coeff*c[2][1]; c[1][2] = c[1][2] - coeff*c[2][2]; c[1][3] = c[1][3] - coeff*c[2][3]; c[1][4] = c[1][4] - coeff*c[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; c[3][0] = c[3][0] - coeff*c[2][0]; c[3][1] = c[3][1] - coeff*c[2][1]; c[3][2] = c[3][2] - coeff*c[2][2]; c[3][3] = c[3][3] - coeff*c[2][3]; c[3][4] = c[3][4] - coeff*c[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; c[4][0] = c[4][0] - coeff*c[2][0]; c[4][1] = c[4][1] - coeff*c[2][1]; c[4][2] = c[4][2] - coeff*c[2][2]; c[4][3] = c[4][3] - coeff*c[2][3]; c[4][4] = c[4][4] - coeff*c[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; c[3][0] = c[3][0]*pivot; c[3][1] = c[3][1]*pivot; c[3][2] = c[3][2]*pivot; c[3][3] = c[3][3]*pivot; c[3][4] = c[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; c[0][0] = c[0][0] - coeff*c[3][0]; c[0][1] = c[0][1] - coeff*c[3][1]; c[0][2] = c[0][2] - coeff*c[3][2]; c[0][3] = c[0][3] - coeff*c[3][3]; c[0][4] = c[0][4] - coeff*c[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; c[1][0] = c[1][0] - coeff*c[3][0]; c[1][1] = c[1][1] - coeff*c[3][1]; c[1][2] = c[1][2] - coeff*c[3][2]; c[1][3] = c[1][3] - coeff*c[3][3]; c[1][4] = c[1][4] - coeff*c[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; c[2][0] = c[2][0] - coeff*c[3][0]; c[2][1] = c[2][1] - coeff*c[3][1]; c[2][2] = c[2][2] - coeff*c[3][2]; c[2][3] = c[2][3] - coeff*c[3][3]; c[2][4] = c[2][4] - coeff*c[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; c[4][0] = c[4][0] - coeff*c[3][0]; c[4][1] = c[4][1] - coeff*c[3][1]; c[4][2] = c[4][2] - coeff*c[3][2]; c[4][3] = c[4][3] - coeff*c[3][3]; c[4][4] = c[4][4] - coeff*c[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; c[4][0] = c[4][0]*pivot; c[4][1] = c[4][1]*pivot; c[4][2] = c[4][2]*pivot; c[4][3] = c[4][3]*pivot; c[4][4] = c[4][4]*pivot; r[4] = r[4] *pivot; coeff = lhs[0][4]; c[0][0] = c[0][0] - coeff*c[4][0]; c[0][1] = c[0][1] - coeff*c[4][1]; c[0][2] = c[0][2] - coeff*c[4][2]; c[0][3] = c[0][3] - coeff*c[4][3]; c[0][4] = c[0][4] - coeff*c[4][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; c[1][0] = c[1][0] - coeff*c[4][0]; c[1][1] = c[1][1] - coeff*c[4][1]; c[1][2] = c[1][2] - coeff*c[4][2]; c[1][3] = c[1][3] - coeff*c[4][3]; c[1][4] = c[1][4] - coeff*c[4][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; c[2][0] = c[2][0] - coeff*c[4][0]; c[2][1] = c[2][1] - coeff*c[4][1]; c[2][2] = c[2][2] - coeff*c[4][2]; c[2][3] = c[2][3] - coeff*c[4][3]; c[2][4] = c[2][4] - coeff*c[4][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; c[3][0] = c[3][0] - coeff*c[4][0]; c[3][1] = c[3][1] - coeff*c[4][1]; c[3][2] = c[3][2] - coeff*c[4][2]; c[3][3] = c[3][3] - coeff*c[4][3]; c[3][4] = c[3][4] - coeff*c[4][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvrhs( double lhs[5][5], double r[5] ) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; r[4] = r[4] *pivot; coeff = lhs[0][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Y direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix][ c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsy(); y_solve_cell(); y_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell][ then generate U(jsize)=rhs(jsize) c else assume U(jsize) is loaded in un pack backsub_info c so just use it c after call u(jstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (j = grid_points[1]-2; j >= 0; j--) { #pragma omp for private(k,m,n) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(JMAX) and rhs'(JMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, jsize; jsize = grid_points[1]-1; #pragma omp for private(k) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(i,0,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][0][k][BB], lhs[i][0][k][CC], rhs[i][0][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (j = 1; j < jsize; j++) { #pragma omp for private(k) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(j-1) from lhs_vector(j) c c rhs(j) = rhs(j) - A*rhs(j-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j-1][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(j) = B(j) - C(j-1)*A(j) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j-1][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for private(k) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][jsize][k][AA], rhs[i][jsize-1][k], rhs[i][jsize][k]); /*-------------------------------------------------------------------- c B(jsize) = B(jsize) - C(jsize-1)*A(jsize) c call matmul_sub(aa,i,jsize,k,c, c $ cc,i,jsize-1,k,c,BB,i,jsize,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][jsize][k][AA], lhs[i][jsize-1][k][CC], lhs[i][jsize][k][BB]); /*-------------------------------------------------------------------- c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][jsize][k][BB], rhs[i][jsize][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Z direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsz(); z_solve_cell(); z_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(ksize)=rhs(ksize) c else assume U(ksize) is loaded in un pack backsub_info c so just use it c after call u(kstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; #pragma omp for private(j,k,m,n) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = grid_points[2]-2; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(KMAX) and rhs'(KMAX) will be sent to next cell. c-------------------------------------------------------------------*/ int i,j,k,ksize; ksize = grid_points[2]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for private(j) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c multiply c(i,j,0) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][0][BB], lhs[i][j][0][CC], rhs[i][j][0] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (k = 1; k < ksize; k++) { #pragma omp for private(j) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(k-1) from lhs_vector(k) c c rhs(k) = rhs(k) - A*rhs(k-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j][k-1], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(k) = B(k) - C(k-1)*A(k) c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j][k-1][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- c Now finish up special cases for last cell c-------------------------------------------------------------------*/ #pragma omp for private(j) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][ksize][AA], rhs[i][j][ksize-1], rhs[i][j][ksize]); /*-------------------------------------------------------------------- c B(ksize) = B(ksize) - C(ksize-1)*A(ksize) c call matmul_sub(aa,i,j,ksize,c, c $ cc,i,j,ksize-1,c,BB,i,j,ksize) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][ksize][AA], lhs[i][j][ksize-1][CC], lhs[i][j][ksize][BB]); /*-------------------------------------------------------------------- c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][ksize][BB], rhs[i][j][ksize] ); } } }
asm-1.c
/* PR middle-end/30263 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp" } */ void foo (void) { int s0, s1 = 5, s2 = 6; int p0, p1, p2; int f0 = 4, f1 = 5, f2 = 6; #pragma omp parallel shared (s0, s1, s2) private (p0, p1, p2) \ firstprivate (f0, f1, f2) { asm ("" : "=m" (p0) : "m" (p1), "mr" (p2)); if (omp_get_thread_num () == 0) asm ("" : "=m" (s0) : "m" (s1), "mr" (s2)); asm ("" : "=m" (f0) : "m" (f1), "mr" (f2)); } }
hitting.h
/** Pasha: Parallel Algorithms for Approximating Compact Universal Hitting Sets hitting.h Header file for main operations for different options of hitting set calculations. @author Baris Ekim @version 1.0 4/15/19 */ #ifndef HITTING_H #define HITTING_H #include "decycling.h" #include "graph.h" #include <cstdlib> #include <iomanip> #include <algorithm> #include <omp.h> int graph::Hitting(int L, string hittingFile) { /** Performs hitting set calculations without parallelization or randomization, counting L-k+1-long paths. @param L: Sequence length, hittingFile: Output file destination. @return hittingCount: Size of hitting set. */ vertexExp = pow(ALPHABET_SIZE, k-1); int imaxHittingNum = -1; ofstream hittingStream; int hittingCount = 0; l = L-k+1; hittingNumArray = new double[edgeNum]; used = new bool[vertexExp]; finished = new bool[vertexExp]; topoSort = new byte[vertexExp]; D = new long double*[l + 1]; long double* Dpool = new long double[(l+1)* vertexExp]; for(int i = 0; i < l+1; i++, Dpool += vertexExp) D[i] = Dpool; hittingStream.open(hittingFile); F = new long double*[l + 1]; long double* Fpool = new long double[(l+1)* vertexExp]; for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool; while (calculatePathsSeq(l)) { int imaxHittingNum = calculateHittingNumber(l); if (imaxHittingNum < 0) break; removeEdge(imaxHittingNum); string label = getLabel(imaxHittingNum); hittingStream << label << "\n"; hittingCount++; } hittingStream.close(); delete [] *D; delete [] D; delete [] *F; delete [] F; topologicalSort(); cout << "Length of longest remaining path: " << maxLength() << "\n"; return hittingCount; } int graph::HittingAny(int L, int x, string hittingFile) { /** Performs hitting set calculations without parallelization or randomization, counting paths of all length. @param L: Sequence length, x: Number of vertices, hittingFile: Output file destination. @return hittingCount: Size of hitting set. */ vertexExp = pow(ALPHABET_SIZE, k-1); ofstream hittingStream; byte* imaxHittingNum; int hittingCount = 0; l = L-k+1; hittingNumAnyArray = new double[edgeNum]; used = new bool[vertexExp]; finished = new bool[vertexExp]; topoSort = new byte[vertexExp]; hittingStream.open(hittingFile); topologicalSort(); D = new long double*[1]; long double* Dpool = new long double[(1)* vertexExp]; for(int i = 0; i < 1; i++, Dpool += vertexExp) D[i] = Dpool; F = new long double*[1]; long double* Fpool = new long double[(1)* vertexExp]; for(int i = 0; i < 1; i++, Fpool += vertexExp) F[i] = Fpool; while (maxLength() >= l) { calculatePathsAny(); imaxHittingNum = calculateHittingNumberAny(x); for (int i = 0; i < x; i++) { removeEdge(imaxHittingNum[i]); string label = getLabel(imaxHittingNum[i]); hittingStream << label << "\n"; hittingCount++; } } hittingStream.close(); delete [] *D; delete [] D; delete [] *F; delete [] F; topologicalSort(); cout << "Length of longest remaining path: " << maxLength() << "\n"; return hittingCount; } int graph::HittingParallel(int L, string hittingFile, int threads) { /** Performs hitting set calculations with parallelization and without randomization, counting L-k+1-long paths. @param L: Sequence length, hittingFile: Output file destination. @return hittingCount: Size of hitting set. */ vertexExp = pow(ALPHABET_SIZE, k-1); int imaxHittingNum = -1; ofstream hittingStream; int hittingCount = 0; l = L-k+1; hittingNumArray = new double[edgeNum]; stageArray = new byte[edgeNum]; used = new bool[vertexExp]; finished = new bool[vertexExp]; topoSort = new byte[vertexExp]; D = new long double*[l + 1]; long double* Dpool = new long double[(l+1)* vertexExp]; for(int i = 0; i < l+1; i++, Dpool += vertexExp) D[i] = Dpool; hittingStream.open(hittingFile); F = new long double*[l + 1]; long double* Fpool = new long double[(l+1)* vertexExp]; for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool; while (calculatePaths(l, threads)) { int imaxHittingNum = calculateHittingNumberParallel(l, false, threads); if (imaxHittingNum < 0) break; removeEdge(imaxHittingNum); string label = getLabel(imaxHittingNum); hittingStream << label << "\n"; hittingCount++; } hittingStream.close(); delete [] *D; delete [] D; delete [] *F; delete [] F; topologicalSort(); cout << "Length of longest remaining path: " << maxLength() << "\n"; return hittingCount; } int graph::HittingRandomParallel(int L, string hittingFile, int threads) { /** Performs hitting set calculations with parallelization and with randomization, counting L-k+1-long paths. @param L: Sequence length, hittingFile: Output file destination. @return hittingCount: Size of hitting set. */ omp_set_dynamic(0); vertexExp = pow(ALPHABET_SIZE, k-1); ofstream hittingStream; int hittingCount = 0; l = L-k+1; epsilon = 0.1; delta = 1/(double)l; if (l <= 200) { delta = 0.1; epsilon = 0.1; } double alpha = 1 - 4*delta -2*epsilon; cout << "Alpha: " << 1/alpha << endl; cout << "Delta: " << delta << endl; cout << "Epsilon: " << epsilon << endl; int i; int j; hittingNumArray = new double[edgeNum]; stageArray = new byte[edgeNum]; used = new bool[vertexExp]; finished = new bool[vertexExp]; pick = new bool[edgeNum]; topoSort = new byte[vertexExp]; D = new long double*[l + 1]; long double* Dpool = new long double[(l+1)* vertexExp]; for(int i = 0; i < l+1; i++, Dpool += vertexExp) D[i] = Dpool; hittingStream.open(hittingFile); F = new long double*[l + 1]; long double* Fpool = new long double[(l+1)* vertexExp]; for(int i = 0; i < l+1; i++, Fpool += vertexExp) F[i] = Fpool; calculatePaths(l, threads); int imaxHittingNum = calculateHittingNumberParallel(l, false, threads); h = findLog((1.0+epsilon), hittingNumArray[imaxHittingNum]); double prob = delta/l; while (h > 0) { total = 0; int hittingCountStage = 0; double pathCountStage = 0; calculatePaths(l, threads); if (calculateHittingNumberParallel(l, true, threads) < 0) break; stageVertices = pushBackVector(); #pragma omp parallel for num_threads(threads) for (int it = 0; it < stageVertices.size(); it++) { i = stageVertices[it]; #pragma omp critical if ((pick[i] == false) && (hittingNumArray[i] > (pow(delta, 3) * total))) { stageArray[i] = 0; pick[i] = true; hittingCountStage++; pathCountStage += hittingNumArray[i]; } } #pragma omp parallel for collapse (2) num_threads(threads) for (int it = 0; it < stageVertices.size(); it++) { for (int jt = 0; jt < stageVertices.size(); jt++) { i = stageVertices[it]; #pragma omp critical if (pick[i] == false) { if (((double) rand() / (RAND_MAX)) <= prob) { stageArray[i] = 0; pick[i] = true; hittingCountStage += 1; pathCountStage += hittingNumArray[i]; } j = stageVertices[jt]; if (pick[j] == false) { if (((double) rand() / (RAND_MAX)) <= prob) { stageArray[j] = 0; pick[j] = true; hittingCountStage += 1; pathCountStage += hittingNumArray[j]; } } } } } hittingCount += hittingCountStage; if (pathCountStage >= hittingCountStage * pow((1.0 + epsilon), h) * (1 - 6*delta - 2*epsilon)) { for (int it = 0; it < stageVertices.size(); it++) { i = stageVertices[it]; if (pick[i] == true) { removeEdge(i); string label = getLabel(i); hittingStream << label << "\n"; } } h--; } else hittingCount -= hittingCountStage; } hittingStream.close(); delete [] *D; delete [] D; delete [] *F; delete [] F; topologicalSort(); cout << "Length of longest remaining path: " << maxLength() << "\n"; return hittingCount; } #endif
array_args.h
#ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_ #define LIGHTGBM_UTILS_ARRAY_AGRS_H_ #include <vector> #include <algorithm> #include <LightGBM/utils/openmp_wrapper.h> namespace LightGBM { /*! * \brief Contains some operation for a array, e.g. ArgMax, TopK. */ template<typename VAL_T> class ArrayArgs { public: inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads); std::vector<size_t> arg_maxs(num_threads, 0); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t start = step * i; if (start >= array.size()) { continue; } size_t end = std::min(array.size(), start + step); size_t arg_max = start; for (size_t j = start + 1; j < end; ++j) { if (array[j] > array[arg_max]) { arg_max = j; } } arg_maxs[i] = arg_max; } size_t ret = arg_maxs[0]; for (int i = 1; i < num_threads; ++i) { if (array[arg_maxs[i]] > array[ret]) { ret = arg_maxs[i]; } } return ret; } inline static size_t ArgMax(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } if (array.size() > 100) { return ArgMaxMT(array); } else { size_t arg_max = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } } inline static size_t ArgMin(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static size_t ArgMax(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_max = 0; for (size_t i = 1; i < n; ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } inline static size_t ArgMin(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < n; ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) { int i = start - 1; int j = end - 1; int p = i; int q = j; if (start >= end) { return; } std::vector<VAL_T>& ref = *arr; VAL_T v = ref[end - 1]; for (;;) { while (ref[++i] > v); while (v > ref[--j]) { if (j == start) { break; } } if (i >= j) { break; } std::swap(ref[i], ref[j]); if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); } if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); } } std::swap(ref[i], ref[end - 1]); j = i - 1; i = i + 1; for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); } for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); } *l = j; *r = i; }; inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) { if (start >= end - 1) { return start; } int l = start; int r = end - 1; Partition(arr, start, end, &l, &r); if ((k > l && k < r) || l == 0 || r == end - 1) { return k; } else if (k <= l) { return ArgMaxAtK(arr, start, l, k); } else { return ArgMaxAtK(arr, r, end, k); } } inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) { out->clear(); if (k <= 0) { return; } for (auto val : array) { out->push_back(val); } if (static_cast<size_t>(k) >= array.size()) { return; } ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1); out->erase(out->begin() + k, out->end()); } }; } // namespace LightGBM #endif // LightGBM_UTILS_ARRAY_AGRS_H_
GB_unaryop__lnot_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int64 // op(A') function: GB_tran__lnot_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int64 ( uint32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ZBinDumper.h
/* * ZBinDumper.h * Cubism * * Created by Panos Hadjidoukas on 3/18/14. * Copyright 2014 CSE Lab, ETH Zurich. All rights reserved. * */ #pragma once #include <iostream> #include <vector> #include <string> #include <stdio.h> #include <sstream> #include "BlockInfo.h" #include "LosslessCompression.h" CUBISM_NAMESPACE_BEGIN typedef struct _header_serial { long size[8]; } header_serial; // The following requirements for the data TStreamer are required: // TStreamer::NCHANNELS : Number of data elements (1=Scalar, 3=Vector, 9=Tensor) // TStreamer::operate : Data access methods for read and write // TStreamer::getAttributeName : Attribute name of the date ("Scalar", "Vector", "Tensor") template <typename TStreamer, typename TGrid> void DumpZBin(const TGrid &grid, const int iCounter, const typename TGrid::Real t, const std::string &f_name, const std::string &dump_path = ".", const bool bDummy = false) { typedef typename TGrid::BlockType B; // f_name is the base filename without file type extension std::ostringstream filename; filename << dump_path << "/" << f_name; FILE *file_id; int status; static const unsigned int NCHANNELS = TStreamer::NCHANNELS; const unsigned int NX = grid.getBlocksPerDimension(0)*B::sizeX; const unsigned int NY = grid.getBlocksPerDimension(1)*B::sizeY; const unsigned int NZ = grid.getBlocksPerDimension(2)*B::sizeZ; Real memsize = (NX * NY * NZ * sizeof(Real))/(1024.*1024.*1024.); std::cout << "Allocating " << memsize << " GB of BIN data" << std::endl; Real * array_all = new Real[NX * NY * NZ]; std::vector<BlockInfo> vInfo_local = grid.getBlocksInfo(); static const unsigned int sX = 0; static const unsigned int sY = 0; static const unsigned int sZ = 0; static const unsigned int eX = B::sizeX; static const unsigned int eY = B::sizeY; static const unsigned int eZ = B::sizeZ; file_id = fopen((filename.str()+".zbin").c_str(), "w"); header_serial tag; fseek(file_id, sizeof(tag), SEEK_SET); for (unsigned int ichannel = 0; ichannel < NCHANNELS; ichannel++) { #pragma omp parallel for for(unsigned int i=0; i<vInfo_local.size(); i++) { BlockInfo& info = vInfo_local[i]; const unsigned int idx[3] = {info.index[0], info.index[1], info.index[2]}; B & b = *(B*)info.ptrBlock; for(unsigned int ix=sX; ix<eX; ix++) { const unsigned int gx = idx[0]*B::sizeX + ix; for(unsigned int iy=sY; iy<eY; iy++) { const unsigned int gy = idx[1]*B::sizeY + iy; for(unsigned int iz=sZ; iz<eZ; iz++) { const unsigned int gz = idx[2]*B::sizeZ + iz; assert((gz + NZ * (gy + NY * gx)) < NX * NY * NZ); Real * const ptr = array_all + (gz + NZ * (gy + NY * gx)); Real output; TStreamer::operate(b, ix, iy, iz, &output, ichannel); // point -> output, ptr[0] = output; } } } } // long local_count = NX * NY * NZ * NCHANNELS; long local_count = NX * NY * NZ * 1; long local_bytes = local_count * sizeof(Real); unsigned int max = local_bytes; // int layout[4] = {NCHANNELS, NX, NY, NZ}; int layout[4] = {NX, NY, NZ, 1}; long compressed_bytes = ZZcompress<typename TGrid::Real>((unsigned char *)array_all, local_bytes, layout, &max); // "in place" printf("Writing %ld bytes of Compressed data (cr = %.2f)\n", compressed_bytes, NX*NY*NZ*sizeof(Real)*NCHANNELS*1.0/compressed_bytes); tag.size[ichannel] = compressed_bytes; size_t wb_data = fwrite(array_all, 1, compressed_bytes, file_id); } fseek(file_id, 0, SEEK_SET); size_t wb_header = fwrite(&tag.size[0], 1, sizeof(tag), file_id); status = fclose(file_id); delete [] array_all; } template <typename TStreamer, typename TGrid> void ReadZBin(TGrid &grid, const std::string& f_name, const std::string& read_path=".") { typedef typename TGrid::BlockType B; typedef typename TGrid::Real Real; // f_name is the base filename without file type extension std::ostringstream filename; filename << read_path << "/" << f_name; int status; FILE *file_id; const int NX = grid.getBlocksPerDimension(0)*B::sizeX; const int NY = grid.getBlocksPerDimension(1)*B::sizeY; const int NZ = grid.getBlocksPerDimension(2)*B::sizeZ; static const int NCHANNELS = TStreamer::NCHANNELS; Real * array_all = new Real[NX * NY * NZ * NCHANNELS]; std::vector<BlockInfo> vInfo_local = grid.getBlocksInfo(); static const int sX = 0; static const int sY = 0; static const int sZ = 0; const int eX = B::sizeX; const int eY = B::sizeY; const int eZ = B::sizeZ; file_id = fopen((filename.str()+".zbin").c_str(), "rb"); long local_count = NX * NY * NZ * 1; long local_bytes = local_count * sizeof(Real); header_serial tag; size_t rb_header = fread(&tag.size[0], 1, sizeof(tag), file_id); #if DBG printf("HEADER(%d):\n", rank); for (int i = 0; i < NCHANNELS; i++) { printf("channel %d: %ld\n", i, tag.size[i]); } #endif for (unsigned int ichannel = 0; ichannel < NCHANNELS; ichannel++) { #if DBG printf("compr. size = %ld\n", tag.size[ichannel]); fflush(0); #endif long compressed_bytes = tag.size[ichannel]; #if DBG printf("Reading %ld bytes of Compressed data (cr = %.2f)\n", compressed_bytes, local_bytes*1.0/compressed_bytes); #endif unsigned char *tmp = (unsigned char *) malloc(compressed_bytes+4096); size_t rb_data = fread(tmp, 1, compressed_bytes, file_id); int layout[4] = {NX, NY, NZ, 1}; size_t decompressed_bytes = ZZdecompress<typename TGrid::Real>(tmp, compressed_bytes, layout, (unsigned char *)array_all, local_bytes); free(tmp); #if DBG printf("size = %ld (%ld)\n", decompressed_bytes, local_bytes); fflush(0); #endif #pragma omp parallel for for(int i=0; i<vInfo_local.size(); i++) { BlockInfo& info = vInfo_local[i]; const int idx[3] = {info.index[0], info.index[1], info.index[2]}; B & b = *(B*)info.ptrBlock; for(int ix=sX; ix<eX; ix++) for(int iy=sY; iy<eY; iy++) for(int iz=sZ; iz<eZ; iz++) { const int gx = idx[0]*B::sizeX + ix; const int gy = idx[1]*B::sizeY + iy; const int gz = idx[2]*B::sizeZ + iz; Real * const ptr_input = array_all + (gz + NZ * (gy + NY * gx)); TStreamer::operate(b, *ptr_input, ix, iy, iz, ichannel); // output -> point } } } /* ichannel */ status = fclose(file_id); delete [] array_all; } CUBISM_NAMESPACE_END
estimator.h
// // estimator.h // #ifndef estimator_h #define estimator_h #include<omp.h> #include<mpi.h> #include<math.h> #include<fprop.h> #include<bprop.h> struct model DDClassifier(struct model model, int* Y, int num_samples, int batch_size, int epochs, float learning_rate, int verbose){ struct layer* layers; float* model_input; float* y_hat; int num_layers = model.num_layers; int num_batches = num_samples/batch_size; int num_features = model.num_features; float* input = malloc(num_features * batch_size * sizeof(float)); int* y = malloc(batch_size*sizeof(int)); MPI_Init(NULL, NULL); int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); model_input = model.input; struct output output; /*Train model*/ for (int i=0; i<epochs; i++){ if (rank==2){ printf("\n\nEpoch: %d/%d\n", i+1, epochs); } float epoch_loss = 0.0; /*Cannot be parallelized*/ for (int batch=0; batch<num_batches; batch++){ #pragma omp parallel for collapse(2) for(int j=0; j<num_features; j++){ for(int k=0; k<batch_size; k++){ input[j*num_features + k] = model_input[batch*num_batches + j*num_features + k]; } } #pragma omp parallel for for(int j=0; j<batch_size; j++){ y[j] = Y[batch*batch_size + j]; } MPI_Barrier(MPI_COMM_WORLD); if (rank==2 && verbose>0){ printf("\n\nEpoch %d, Batch %d \n\nForward propagation. Layer %d\n", i+1, batch+1, 0+1); } output = FProp(input, model.layers[0], batch_size, rank, verbose); model.layers[0].Z = output.Z; model.layers[0].A = output.A; MPI_Barrier(MPI_COMM_WORLD); /*Cannot be parallelized*/ for (int j=1; j<num_layers; j++){ if (rank==2 && verbose>0){ printf("\n\nEpoch %d, Batch %d \n\nForward propagation. Layer %d\n", i+1, batch+1, j+1); } output = FProp(model.layers[j-1].A, model.layers[j], batch_size, rank, verbose); model.layers[j].Z = output.Z; model.layers[j].A = output.A; MPI_Barrier(MPI_COMM_WORLD); } y_hat = model.layers[num_layers-1].A; int num_classes = model.layers[num_layers-1].num_nodes; if (rank==2){ if (rank==2 && verbose>0){ printf("\nCalculating output..\n"); } //printf("\n\nEpoch %d, Forward propagation. Output\n", i+1); for (int j=0; j<num_classes * batch_size;j++){ //printf("%f ", y_hat[j]); } } /*Loss*/ if (rank==2 && verbose>0){ printf("Calculating loss..\n"); } float loss = 0.0; for (int j=0; j<num_classes; j++){ for (int k=0; k<batch_size; k++){ if(j==y[k]){ loss += log(y_hat[j*num_classes + k]); } else{ loss += log(1 - y_hat[j*num_classes + k]); } } } loss /= -batch_size*num_classes; epoch_loss += loss; model = BProp(model, y_hat, y, batch_size, learning_rate, rank, verbose); MPI_Barrier(MPI_COMM_WORLD); } epoch_loss /= num_batches; if (rank==2){ printf("\n\n\n\n\n\nloss: %f\n\n\n\n\n\n", epoch_loss); } } MPI_Barrier(MPI_COMM_WORLD); //printf("%f Status: OK\n", input[9]); return model; } #endif /* estimator_h */
tool_not_available.c
// The OpenMP standard defines 3 ways of providing ompt_start_tool: // 1. "statically-linking the tool’s definition of ompt_start_tool into an OpenMP application" // RUN: %libomp-compile -DCODE -DTOOL && %libomp-run | FileCheck %s // Note: We should compile the tool without -fopenmp as other tools developer // would do. Otherwise this test may pass for the wrong reasons on Darwin. // RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so // 2. "introducing a dynamically-linked library that includes the tool’s definition of ompt_start_tool into the application’s address space" // 2.1 Link with tool during compilation // RUN: %libomp-compile -DCODE %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.2 Link with tool during compilation, but AFTER the runtime // RUN: %libomp-compile -DCODE -lomp %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.3 Inject tool via the dynamic loader // RUN: %libomp-compile -DCODE && %preload-tool %libomp-run | FileCheck %s // 3. "providing the name of a dynamically-linked library appropriate for the architecture and operating system used by the application in the tool-libraries-var ICV" // RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s // REQUIRES: ompt /* * This file contains code for an OMPT shared library tool to be * loaded and the code for the OpenMP executable. * -DTOOL enables the code for the tool during compilation * -DCODE enables the code for the executable during compilation */ #ifdef CODE #include "stdio.h" #include "omp.h" #include "ompt.h" int main() { #pragma omp parallel num_threads(2) { #pragma omp master { int result = omp_control_tool(omp_control_tool_start, 0, NULL); printf("0: control_tool()=%d\n", result); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: Do not initialize tool // CHECK: {{^}}0: control_tool()=-2 return 0; } #endif /* CODE */ #ifdef TOOL #include <ompt.h> #include "stdio.h" ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { printf("0: Do not initialize tool\n"); return NULL; } #endif /* TOOL */
GB_unop__identity_fc32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_int32) // op(A') function: GB (_unop_tran__identity_fc32_int32) // C type: GxB_FC32_t // A type: int32_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_int32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction.c
#include <omp.h> #include <stdio.h> #define size 1000000 double a[size], b[size], result; int main () { int i, n, chunk; /* Some initializations */ n = size; chunk = 10; result = 0.0; for (i=0; i < n; i++) { a[i] = i * 1.0; b[i] = i * 2.0; } #pragma omp parallel for default(shared) reduction(+:result) private(i) for (i=0; i < n; i++) result = result + (a[i] * b[i]); printf("Final result= %f\n",result); }
GB_unop__identity_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fp64) // op(A') function: GB (_unop_tran__identity_uint64_fp64) // C type: uint64_t // A type: double // cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fp64) ( uint64_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "ps/ps.h" #include "mxnet/kvstore.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { static const int kRowSparsePushPull = 1; static const int kDefaultPushPull = 0; static const int kStopServer = -1; static const int kSyncMode = -2; /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<float>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct MergeBuf { std::vector<ps::KVMeta> request; NDArray array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { if (recved.head == kStopServer) { exec_.Stop(); } else if (recved.head == kSyncMode) { sync_mode_ = true; } else { // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); } app->Response(recved); } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<real_t>& req_data, ps::KVServer<real_t>* server) { if (req_meta.cmd == kRowSparsePushPull) { DataHandleRowSparse(req_meta, req_data, server); } else { DataHandleDefault(req_meta, req_data, server); } return; } inline void ApplyUpdates(const int key, MergeBuf *merged, NDArray *stored, ps::KVServer<real_t>* server) { if (merged->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python if (updater_) { exec_.Exec([this, key, merged, stored](){ CHECK(updater_); updater_(key, merged->array, stored); }); } else { // if no updater, just copy CopyFromTo(merged->array, stored); } if (log_verbose_) { LOG(INFO) << "sync response to " << merged->request.size() << " workers"; } for (const auto& req : merged->request) { server->Response(req); } merged->request.clear(); stored->WaitToRead(); } else { merged->array.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void DataHandleRowSparse(const ps::KVMeta& req_meta, const ps::KVPairs<real_t>& req_data, ps::KVServer<real_t>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); real_t* data = req_data.vals.data(); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context()); Engine::Get()->PushAsync( [recved, stored](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); op::PopulateFullIdxRspImpl(s, &rsp); mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(), recved.data().FlatTo1D<cpu, float>(), s); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); stored.WaitToRead(); server->Response(req_meta); return; } // synced push if (sync_mode_) { if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys; auto& merged = merge_buf_[master_key]; if (merged.array.is_none()) { merged.array = NDArray(kRowSparseStorage, stored.shape(), Context()); } if (num_rows == 0) { // reset to zeros if (merged.request.size() == 0) { merged.array = NDArray(kRowSparseStorage, stored.shape(), Context()); } else { // nothing to aggregate } merged.request.push_back(req_meta); ApplyUpdates(master_key, &merged, &stored, server); return; } auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (merged.request.size() == 0) { CopyFromTo(recved, &merged.array, 0); } else { NDArray out(kRowSparseStorage, stored.shape(), Context()); std::vector<Engine::VarHandle> const_vars; const_vars.push_back(recved.var()); const_vars.push_back(merged.array.var()); // accumulate row_sparse gradients // TODO(haibin) override + operator for row_sparse NDArray // instead of calling BinaryComputeRspRsp directly using namespace mshadow; Engine::Get()->PushAsync( [recved, merged, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, mshadow::op::plus>( {}, {}, {recved, merged.array}, {kWriteTo}, {out}); on_complete(); }, recved.ctx(), const_vars, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &merged.array, 0); } merged.request.push_back(req_meta); ApplyUpdates(master_key, &merged, &stored, server); } else { // async push if (log_verbose_) LOG(INFO) << "async push: " << master_key; if (num_rows == 0) { server->Response(req_meta); return; } auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); exec_.Exec([this, master_key, &recved, &stored](){ CHECK(updater_); updater_(master_key, recved, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<real_t> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const float* data = stored.data().dptr<float>(); auto len = unit_len * num_rows; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_len; auto begin = (i - 1) * unit_len; auto end = i * unit_len; response.vals.segment(begin, end).CopyFrom(src, unit_len); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } } void DataHandleDefault(const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { CHECK_EQ(req_meta.cmd, kDefaultPushPull); // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t)req_data.lens[0]}; TShape dshape(ds, ds + 1); TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context()); CopyFromTo(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = merge_buf_[key]; if (merged.array.is_none()) { merged.array = NDArray(dshape, Context()); } if (merged.request.size() == 0) { CopyFromTo(recved, &merged.array, 0); } else { merged.array += recved; } merged.request.push_back(req_meta); ApplyUpdates(key, &merged, &stored, server); } else { // async push exec_.Exec([this, key, &recved, &stored](){ CHECK(updater_); updater_(key, recved, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull ps::KVPairs<real_t> response; CHECK(!stored.is_none()) << "init " << key << " first"; auto len = stored.shape().Size(); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len); server->Response(req_meta, response); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; std::unordered_map<int, NDArray> store_; std::unordered_map<int, MergeBuf> merge_buf_; Executor exec_; ps::KVServer<float>* ps_server_; // whether to LOG verbose information bool log_verbose_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
trmv_x_bsr_n_lo_trans.c
#include "alphasparse/kernel.h" #ifdef _OPENMP #include<omp.h> #endif #include"alphasparse/opt.h" #include<string.h> #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { ALPHA_INT bs = A->block_size; ALPHA_INT m_inner = A->rows; ALPHA_INT n_inner = A->cols; if(m_inner != n_inner) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, m_inner, thread_num, partition); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number)*n_inner*bs); memset(tmp[tid], 0, sizeof(ALPHA_Number)*n_inner*bs); if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR) { for (ALPHA_INT i = local_m_s; i < local_m_e; i++){ ALPHA_INT col = i*bs; ALPHA_INT block_start = A->rows_start[i], block_end = A->rows_end[i]; ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], i) - A->col_indx; for (ALPHA_INT ai = block_start; ai < lower_end; ai++){ ALPHA_INT row = A->col_indx[ai]; ALPHA_INT m_s = row*bs; if (row == i){ for (int s = 0; s < bs*bs; s=s+bs){ for (int s1 = s; s1 <= s +s/bs; s1++){ alpha_madde(tmp[tid][m_s+s1-s], A->values[s1+ai*bs*bs], x[col+s/bs]); } } }else { for (int s = 0; s < bs*bs; s=s+bs){ for (int s1 = s; s1 < s+bs; s1++){ alpha_madde(tmp[tid][m_s+s1-s], A->values[s1+ai*bs*bs], x[col+s/bs]); } } } } } }else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++){ ALPHA_INT col = i*bs; ALPHA_INT block_start = A->rows_start[i], block_end = A->rows_end[i]; ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], i) - A->col_indx; for (ALPHA_INT ai = block_start; ai < lower_end; ai++){ ALPHA_INT row = A->col_indx[ai]; ALPHA_INT m_s = row*bs; if (row == i){ for (int s = 0; s < bs*bs; s=s+bs){ for (int s1 = s + s/bs; s1 < s+bs; s1++){ alpha_madde(tmp[tid][m_s+s/bs], A->values[s1+ai*bs*bs], x[s1-s+col]); } } }else { for (int s = 0; s < bs*bs; s=s+bs){ for (int s1 = s; s1 < s+bs; s1++){ alpha_madde(tmp[tid][m_s+s/bs], A->values[s1+ai*bs*bs], x[s1-s+col]); } } } } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n_inner*bs; ++i){ ALPHA_Number tmp_y; alpha_setzero(tmp_y); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); } alpha_mul(y[i], y[i], beta); alpha_madde(y[i], tmp_y, alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
gaussian_elimination-con-main.c
#define TYPE unsigned long #define N_BITS 64 #define MAX_TYPE 18446744073709551615UL //#include "../include/gaussian_elimination.h" #include <stdio.h> #include <stdlib.h> #include "matrix.h" #include <omp.h> #include "sieve.h" #include "vector.h" #include <gmp.h> /* Funzioni per realizzare l'eliminazione gaussiana in modulo 2 * La matrice di bit degli espondenti modulo 2 sara' organizzata * nel seguente modo: * * N_BITS N_BITS N_BITS X = eventuale padding * 1) [000 ... 001] [000 ... 001] ... [000 ... 0XX] * 2) [000 ... 000] [000 ... 010] ... [000 ... 0XX] * ... ... * K) [010 ... 101] [010 ... 101] ... [000 ... 0XX] */ /* Tipo di dato che determina la dimensione del blocco di bit */ typedef TYPE word; struct row_stats { // bit piu a destra long unsigned b_dx; // num di bit a 1 long unsigned n_bit; }; /****************/ /*******************************************************/ void print_bits(word a) { unsigned int bits[N_BITS]; for(unsigned int i = 0; i < N_BITS; ++i) bits[i] = (a >> i) & 1U; for(int i = 63; i >= 0; --i) printf("%d", bits[i]); } void print_all(unsigned long **M, int righe, int blocchi){ for(int i = 0; i < righe; ++i) { for(int j = 0; j < blocchi; ++j) { print_bits(get_matrix_l(M, i, j)); printf(" "); } printf("\n"); } } void print_M(unsigned int ** M, int r, int c) { for(int i = 0; i < r; ++i) { for(int j = 0; j < c; ++j) printf("%u, ", get_matrix(M, i, j)); printf("\n"); } } void print_M_con_i(unsigned int ** M, int r, int c) { for(int i = 0; i < r; ++i) { printf("%d: ", i); for(int j = 0; j < c; ++j) printf("%u, ", get_matrix(M, i, j)); printf("\n"); } } void print_M_2(unsigned int ** M, int r, int c) { for(int i = 0; i < r; ++i) { for(int j = 0; j < c; ++j) printf("%u", get_matrix(M, i, j) % 2); printf("\n"); } } /****************/ /* Funzione che realizza: * a = b * c mod n */ void modular_multiplication(mpz_t a, mpz_t b, mpz_t c, mpz_t n) { mpz_mul (a, b, c); mpz_mod (a, a, n); } /* Funzione che ritorna l'i-mo bit della k-ma * riga della matrice M */ unsigned get_k_i(word ** M, unsigned long k, unsigned long i) { unsigned long I = i / N_BITS; unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1); return (get_matrix_l(M,k,I) >> n_shift) & 1; } void set_k_i(word ** M, unsigned long k, unsigned long i, unsigned int value) { unsigned long I = i / N_BITS; unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1); word b = get_matrix_l(M, k, I); //printf("I=%lu, n_s=%lu, ", I, n_shift); //print_bits(((unsigned long) value % 2) << n_shift); //printf(" - "); b = b | (((unsigned long) value % 2UL) << n_shift); //printf("b=%lu\n", b); //print_bits(b); //printf("\n"); set_matrix_l(M, k, I, b); } /* Funzione che esegue la somma modulo 2 dei vettori * v(k) = v(j) + v(k). Utilizzo lo XOR bit a bit che corrisponde * alla somma in modulo 2. Eseguo lo XOR tra ogni blocco dei vettori */ void add_vector_z2(word ** M, unsigned long k, unsigned long j, unsigned long n_blocks) { for(unsigned long I = 0; I < n_blocks; ++I) { word b = get_matrix_l(M, k, I) ^ get_matrix_l(M, j, I); set_matrix_l(M, k, I, b); } } /* Funzione che esegue la somma in Z dei vettori * v(k) = v(j) + v(k) */ void add_vector_z(unsigned int ** M, unsigned long k, unsigned long j, unsigned long n_col) { for(unsigned long i = 0; i < n_col; ++i) { unsigned int sum = get_matrix(M, k, i) + get_matrix(M, j, i); set_matrix(M, k, i, sum); } } /* Funzione che setta la struttura row_stats con le informazioni * sulle righe della matrice (ultimo bit a dx e numero bit a 1) */ void get_wt_k(word ** M, unsigned long k, unsigned long n_col, struct row_stats * wt) { // Inizializzo indicando l'ultimo bit nella posizione dopo l'ultima //wt->b_dx = n_blocks * N_BITS; wt->b_dx = n_col; wt->n_bit = 0; // Scorro partendo dalla fine fino a trovare il primo 1 unsigned long i = 0; while(get_k_i(M, k, i) == 0 && i < n_col) { //printf("%d", get_k_i(M, k, i)); ++i; } //printf("\n %lu:%d", i, get_k_i(M, k, i)); // printf("\n"); /* for(int ii = 0; ii < n_col; ++ii) for(int kk=0; kk < 1; kk++) printf("%d", get_k_i(M, kk, ii)); printf("\n"); */ // Se ho raggiunto la fine non ci sono bit a 1 ed esco if(i >= n_col) return; wt->b_dx = i; for(i = i; i < n_col; ++i) if(get_k_i(M, k, i)) wt->n_bit++; } /* Funzione che esegue l'eliminazione gaussiana */ void gaussian_elimination_mod_2(unsigned int ** M_z, word ** M_z2, mpz_t * Q_A, mpz_t N, unsigned long n_row, unsigned long n_col, unsigned long n_blocks, struct row_stats wt[]) { for(unsigned long i = 0; i < n_col; ++i) { unsigned long j; for(j = 0; j < n_row && wt[j].b_dx != i; ++j) ;// avanzo j e basta for(unsigned k = j + 1; k < n_row; ++k) { if(get_k_i(M_z2, k, i)) { // il bit v(k)(i) deve essere a 1 add_vector_z2(M_z2, k, j, n_blocks); // v(k) = v(k) + v(j) mod 2 add_vector_z(M_z, k, j, n_col); // v(k) = v(k) + v(j) //gmp_printf("%Zd * %Zd = ", Q_A[k], Q_A[j]); modular_multiplication(Q_A[k], Q_A[k], Q_A[j], N); // Q(Ak) = Q(Ak) * Q(Aj) //gmp_printf("%Zd\n", Q_A[k]); get_wt_k(M_z2, k, n_col, & wt[k]); // aggiorno wt } } //printf("\n"); //print_all(M_z2, n_row, n_blocks); //printf("\n-----------\n"); } } /* Funzione che ritorna se una riga, nella matrice a blocchi mod 2, * è nulla */ int row_is_null(word ** M_z2, unsigned long k, unsigned long n_col, unsigned long n_blocks) { for(unsigned long i = 0; i < n_blocks-1; ++i) { if(get_matrix_l(M_z2, k, i) != 0) return 0; } unsigned long n_shift = N_BITS - (n_col % N_BITS); /* 01010 ... 110XXX ... X <- X è il padding * * 1111111111111111111111 <- è MAX_TYPE (tutti 1) * MAX_TYPE << n_shift = * 1111111111111000000000 <- mettendo in & ignoro il padding*/ print_bits(MAX_TYPE << n_shift); printf("\n"); word b = get_matrix_l(M_z2, k, n_blocks-1) & MAX_TYPE << n_shift; if(b != 0) return 0; return 1; } void congruence_relation(mpz_t N, // numero da fattorizzare unsigned int * factor_base, word ** M_z2, // esponenti mod 2 unsigned int ** M_z, // esponenti interi mpz_t * Q_a, // vettore Q(Ai) struct row_stats * wt, // zeri sulle righe unsigned long n_row, unsigned long n_primes) { mpz_t mpz_temp; mpz_init(mpz_temp); mpz_t mpz_prime; mpz_init(mpz_prime); mpz_t X; mpz_init(X); mpz_t Y; mpz_init(Y); mpz_t m; mpz_init(m); mpz_t q; mpz_init(q); unsigned int exp; for(unsigned long i = 0; i < n_row; ++i) if(wt[i].n_bit == 0) { // dipendenza trovata mpz_set_ui(Y, 1); for(int j = 0; j < n_primes; ++j) { mpz_set_ui(mpz_prime, factor_base[j]); //gmp_printf("prime=%Zd\n", mpz_prime); exp = get_matrix(M_z, i, j) / 2; //printf("ok\n"); // temp = (factor_base[j])^(M_z[i][j]) mod N mpz_powm_ui(mpz_temp, mpz_prime, exp, N); //gmp_printf("temp = %Zd = %Zd^%lu\n", mpz_temp, mpz_prime, exp); // Y = Y * temp mod N modular_multiplication(Y, Y, mpz_temp, N); //gmp_printf("Y = %Zd\n", mpz_temp); } mpz_set(X, Q_a[i]); //gmp_printf("(A+s) = %Zd\n", Q_a[i]); gmp_printf("mcd(%Zd + %Zd, %Zd) = ", X, Y, N); mpz_add(X, X, Y); // X = X + Y mpz_gcd(m, X, N); // m = mcd(X + Y, N) gmp_printf("%Zd", m); mpz_divexact(q, N, m); // q = N / m; //gmp_printf("%Zd * %Zd = %Zd, N = ", m, q, N); if(mpz_cmp(m, N) < 0 && mpz_cmp_ui(m, 1) > 0) { // fatt. non banale gmp_printf(", N = %Zd * %Zd\n", m, q); } else printf("\n"); } //mpz_clears(mpz_temp, mpz_prime, X, Y, m, q); } /*****************************************************/ int main() { word ** M; unsigned int ** M_z; unsigned long n_primes = 15; unsigned long n_blocchi = 1;//n_primes / N_BIT double t1, t2; unsigned int poly_val_num = 12800; mpz_t N; mpz_init(N); mpz_set_str(N, "8616460799", 10); unsigned int factor_base[15] = {2, 5, 7, 11, 17, 23, 37, 47, 59, 67, 71, 83, 89, 97, 101}; pair solutions[15]; unsigned c = 0; solutions[c].sol1 = 1; solutions[c++].sol2 = 1; solutions[c].sol1 = 3; solutions[c++].sol2 = 4; solutions[c].sol1 = 6; solutions[c++].sol2 = 0; solutions[c].sol1 = 6; solutions[c++].sol2 = 4; solutions[c].sol1 = 15; solutions[c++].sol2 = 11; solutions[c].sol1 = 7; solutions[c++].sol2 = 1; solutions[c].sol1 = 21; solutions[c++].sol2 = 34; solutions[c].sol1 = 15; solutions[c++].sol2 = 34; solutions[c].sol1 = 9; solutions[c++].sol2 = 16; solutions[c].sol1 = 51; solutions[c++].sol2 = 25; solutions[c].sol1 = 69; solutions[c++].sol2 = 19; solutions[c].sol1 = 68; solutions[c++].sol2 = 38; solutions[c].sol1 = 8; solutions[c++].sol2 = 87; solutions[c].sol1 = 52; solutions[c++].sol2 = 55; solutions[c].sol1 = 34; solutions[c++].sol2 = 57; /* for(int k = 0; k < n_primes; ++k) printf("%d: xp=%d, yp=%d\n", factor_base[k], solutions[k].sol1, solutions[k].sol2); printf("\n"); */ t1 = omp_get_wtime(); unsigned int ** exponents; init_matrix(& exponents, poly_val_num, n_primes); for(int i = 0; i < poly_val_num; ++i) for(int j = 0; j < n_primes; ++j) set_matrix(exponents, i, j, 0); print_M(exponents, poly_val_num, n_primes); mpz_t * Q_A; init_vector_mpz(& Q_A, poly_val_num); unsigned int n_fatt; n_fatt = sieve(N, factor_base, n_primes, solutions, exponents, Q_A, poly_val_num); //printf("\n"); //printf("n_fatt:%d\n\n", n_fatt); //print_M_con_i(exponents, poly_val_num, n_primes); //init_matrix(& M_z, n_fatt, n_primes); init_matrix_l(& M, n_fatt, n_blocchi); //unsigned int f_c[] = {34, 453, 1134, 3143, 3388, 4514, 4808, 5251, 6033, 6263, 6683, 7508, 8494, 9086, 10233, 12379, 12799}; //for(int i = 0; i < n_fatt; ++i) // for(int j = 0; j < n_primes; ++j) // set_matrix(M_z, i, j, get_matrix(exponents, f_c[i], j)); //print_M(exponents, n_fatt, n_primes); /* for(int i = 0; i < n_fatt; ++i) for(int j = 0; j < n_primes; ++j) set_matrix(M_z, i, j, rand() % 10); */ for(int i = 0; i < n_fatt; ++i) for(int j = 0; j < n_primes; ++j) { set_k_i(M, i, j, 0); } for(int i = 0; i < n_fatt; ++i) for(int j = 0; j < n_primes; ++j) { unsigned int a = get_matrix(exponents, i, j); set_k_i(M, i, j, a); } //printf("\n"); //print_all(M, n_fatt, n_blocchi); struct row_stats * wt = malloc(sizeof(struct row_stats) * n_fatt); int n_threads = omp_get_num_threads(); int chunck = n_fatt/n_threads; //#pragma omp parallel for schedule(dynamic, chunck) for(int i = 0; i < n_fatt; ++i) get_wt_k(M, i, n_primes, & wt[i]); t2 = omp_get_wtime(); double t_set_up = t2 - t1; t1 = omp_get_wtime(); gaussian_elimination_mod_2(exponents, M, Q_A, N, n_fatt, n_primes, n_blocchi, wt); t2 = omp_get_wtime(); double t_gauss = t2 - t1; //printf("\n\ngauss:\n"); //print_M(exponents, n_fatt, n_primes); //printf("\n"); //print_all(M, n_fatt, n_blocchi); //printf("\n"); mpz_t temp; mpz_init(temp); /* for(int i = 0; i < n_fatt; ++i) { //mpz_sqrt(temp, Q_A[i]); //gmp_printf ("%Zd\n", temp); gmp_printf ("%Zd\n", Q_A[i]); } */ //printf("\n"); congruence_relation(N, factor_base, M, exponents, Q_A, wt, n_fatt, n_primes); /* if(row_is_null(M, 16, n_primes, n_blocchi)) printf("test1 ok\n"); else printf("test1 errore\n"); set_matrix_l(M, 16, 0, 1); print_bits(get_matrix_l(M, 16, 0)); printf("\n"); if(row_is_null(M, 16, n_primes, n_blocchi)) printf("test2 ok\n"); else printf("test2 errore\n"); */ printf("#time_gauss time_set_up time_totale\n"); printf("%.6f ", t_gauss); printf("%.6f ", t_set_up); printf("%.6f\n", t_gauss + t_set_up); }
kernel.openmp.h
#include <iris/iris_openmp.h> static void kernel0(float* dst, float* src, IRIS_OPENMP_KERNEL_ARGS) { int i; #pragma omp parallel for shared(dst, src) private(i) IRIS_OPENMP_KERNEL_BEGIN(i) dst[i] = src[i]; IRIS_OPENMP_KERNEL_END } static void kernel1(float* dst, float* src, IRIS_OPENMP_KERNEL_ARGS) { int i; #pragma omp parallel for shared(dst, src) private(i) IRIS_OPENMP_KERNEL_BEGIN(i) dst[i] += src[i]; IRIS_OPENMP_KERNEL_END }
GB_unaryop__abs_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_uint8 // op(A') function: GB_tran__abs_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_uint8 ( uint16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallelReadTiff.c
#include "tiffio.h" #include <stdio.h> #include <stdint.h> #include "omp.h" //mex -v COPTIMFLAGS="-O3 -fwrapv -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' '-L/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' -ltiff /clusterfs/fiona/matthewmueller/parallelTiffTesting/main.c void DummyHandler(const char* module, const char* fmt, va_list ap) { // ignore errors and warnings } void* mallocDynamic(uint64_t x, uint64_t bits){ switch(bits){ case 8: return malloc(x*sizeof(uint8_t)); case 16: return malloc(x*sizeof(uint16_t)); case 32: return malloc(x*sizeof(float)); case 64: return malloc(x*sizeof(double)); default: printf("Image is not 8/16 bit, single, or double. Using single."); return malloc(x*sizeof(float)); } } void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, char* fileName, void* tiff, uint64_t bits, uint64_t startSlice){ int32_t numWorkers = omp_get_max_threads(); int32_t batchSize = (z-1)/numWorkers+1; int32_t w; #pragma omp parallel for for(w = 0; w < numWorkers; w++){ TIFF* tif = TIFFOpen(fileName, "r"); void* buffer = mallocDynamic(x, bits); for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ if(dir>=z+startSlice) break; TIFFSetDirectory(tif, (uint64_t)dir); for (int64_t i = 0; i < y; i++) { //loading the data into a buffer switch(bits){ case 8: TIFFReadScanline(tif, (uint8_t*)buffer, i, 0); // Map Values to flip x and y for MATLAB for(int64_t j = 0; j < x; j++){ ((uint8_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j]; } break; case 16: TIFFReadScanline(tif, (uint16_t*)buffer, i, 0); // Map Values to flip x and y for MATLAB for(int64_t j = 0; j < x; j++){ ((uint16_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j]; } break; case 32: TIFFReadScanline(tif, (float*)buffer, i, 0); // Map Values to flip x and y for MATLAB for(int64_t j = 0; j < x; j++){ ((float*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((float*)buffer)[j]; } break; case 64: TIFFReadScanline(tif, (double*)buffer, i, 0); // Map Values to flip x and y for MATLAB for(int64_t j = 0; j < x; j++){ ((double*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((double*)buffer)[j]; } break; } } } free(buffer); TIFFClose(tif); } } void* readTiffParallelWrapper(char* fileName) { TIFFSetWarningHandler(DummyHandler); TIFF* tif = TIFFOpen(fileName, "r"); if(!tif) return NULL; uint64_t x = 1,y = 1,z = 1,bits = 1, startSlice = 0; TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y); uint16_t s = 0, m = 0, t = 1; while(TIFFSetDirectory(tif,t)){ s = t; t *= 8; if(s > t){ t = 65535; printf("Number of slices > 32768"); break; } } while(s != t){ m = (s+t+1)/2; if(TIFFSetDirectory(tif,m)){ s = m; } else{ if(m > 0) t = m-1; else t = m; } } z = s+1; TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits); TIFFClose(tif); uint64_t dim[3]; dim[0] = y; dim[1] = x; dim[2] = z; if(bits == 8){ uint8_t* tiff = (uint8_t*)malloc(x*y*z*sizeof(uint8_t)); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice); return (void*)tiff; } else if(bits == 16){ uint16_t* tiff = (uint16_t*)malloc(x*y*z*sizeof(uint16_t)); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice); return (void*)tiff; } else if(bits == 32){ float* tiff = (float*)malloc(x*y*z*sizeof(float)); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice); return (void*)tiff; } else if(bits == 64){ double* tiff = (double*)malloc(x*y*z*sizeof(double)); readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice); return (void*)tiff; } else{ return NULL; } }
fill_int2c.c
/* * */ #include <stdlib.h> #include <complex.h> #include "config.h" #include "cint.h" #include "np_helper/np_helper.h" #define PLAIN 0 #define HERMITIAN 1 #define ANTIHERMI 2 #define SYMMETRIC 3 #define NCTRMAX 72 static void dcopy(double *out, double *in, int comp, int ni, int nj, int di, int dj) { int i, j, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { out[j*ni+i] = in[j*di+i]; } } out += ni * nj; in += di * dj; } } /* * mat(naoi,naoj,comp) in F-order */ void GTOint2c(int (*intor)(), double *mat, int comp, int hermi, int *shls_slice, int *ao_loc, CINTOpt *opt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; #pragma omp parallel default(none) \ shared(intor, mat, comp, hermi, ao_loc, opt, atm, natm, bas, nbas, env) { int ish, jsh, ij, di, dj, i0, j0; int shls[2]; double *buf = malloc(sizeof(double)*NCTRMAX*NCTRMAX*comp); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { // fill up only upper triangle of F-array continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; (*intor)(buf, shls, atm, natm, bas, nbas, env); i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; dcopy(mat+j0*naoi+i0, buf, comp, naoi, naoj, di, dj); } free(buf); } if (hermi != PLAIN) { // lower triangle of F-array int ic; for (ic = 0; ic < comp; ic++) { NPdsymm_triu(naoi, mat+ic*naoi*naoi, hermi); } } } static void zcopy(double complex *out, double complex *in, int comp, int ni, int nj, int di, int dj) { int i, j, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { out[j*ni+i] = in[j*di+i]; } } out += ni * nj; in += di * dj; } } void GTOint2c_spinor(int (*intor)(), double complex *mat, int comp, int hermi, int *shls_slice, int *ao_loc, CINTOpt *opt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; #pragma omp parallel default(none) \ shared(intor, mat, comp, hermi, ao_loc, opt, atm, natm, bas, nbas, env) { int ish, jsh, ij, di, dj, i0, j0; int shls[2]; double complex *buf = malloc(sizeof(double complex)*NCTRMAX*NCTRMAX*comp); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; (*intor)(buf, shls, atm, natm, bas, nbas, env); i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; zcopy(mat+j0*naoi+i0, buf, comp, naoi, naoj, di, dj); } free(buf); } if (hermi != PLAIN) { int ic; for (ic = 0; ic < comp; ic++) { NPzhermi_triu(naoi, mat+ic*naoi*naoi, hermi); } } } void GTOint2c2e(int (*intor)(), double *mat, int comp, int hermi, int *shls_slice, int *ao_loc, CINTOpt *opt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; #pragma omp parallel default(none) \ shared(intor, mat, comp, hermi, ao_loc, opt, atm, natm, bas, nbas, env) { int ish, jsh, ij, di, dj, i0, j0; int shls[2]; double *buf = malloc(sizeof(double)*NCTRMAX*NCTRMAX*comp); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; (*intor)(buf, shls, atm, natm, bas, nbas, env, opt); i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; dcopy(mat+j0*naoi+i0, buf, comp, naoi, naoj, di, dj); } free(buf); } if (hermi != PLAIN) { int ic; for (ic = 0; ic < comp; ic++) { NPdsymm_triu(naoi, mat+ic*naoi*naoi, hermi); } } } void GTOint2c2e_spinor(int (*intor)(), double complex *mat, int comp, int hermi, int *shls_slice, int *ao_loc, CINTOpt *opt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; #pragma omp parallel default(none) \ shared(intor, mat, comp, hermi, ao_loc, opt, atm, natm, bas, nbas, env) { int ish, jsh, ij, di, dj, i0, j0; int shls[2]; double complex *buf = malloc(sizeof(double complex)*NCTRMAX*NCTRMAX*comp); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; (*intor)(buf, shls, atm, natm, bas, nbas, env, opt); i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; zcopy(mat+j0*naoi+i0, buf, comp, naoi, naoj, di, dj); } free(buf); } if (hermi != PLAIN) { int ic; for (ic = 0; ic < comp; ic++) { NPzhermi_triu(naoi, mat+ic*naoi*naoi, hermi); } } }
slansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlansy.c, normal z -> s, Fri Sep 28 17:38:07 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lansy * * Returns the norm of a symmetric matrix as * * slansy = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] A * On entry, the symmetric matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval float * The specified norm of the symmetric matrix A. * ******************************************************************************* * * @sa plasma_omp_slansy * @sa plasma_clansy * @sa plasma_slansy * @sa plasma_slansy * ******************************************************************************/ float plasma_slansy(plasma_enum_t norm, plasma_enum_t uplo, int n, float *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } // quick return if (n == 0) return 0.0; // Tune parameters if (plasma->tuning) plasma_tune_lansy(plasma, PlasmaRealFloat, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. float *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (float*)malloc((size_t)A.mt*A.nt*sizeof(float)); break; case PlasmaOneNorm: case PlasmaInfNorm: work = (float*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(float)); break; case PlasmaFrobeniusNorm: work = (float*)malloc((size_t)2*A.mt*A.nt*sizeof(float)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); float value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_sge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_slansy(norm, uplo, A, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lansy * * Calculates the max, one, infinity or Frobenius norm of a symmetric matrix. * Non-blocking equivalent of plasma_slansy(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.mt*A.n + A.n * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_slansy * @sa plasma_omp_clansy * @sa plasma_omp_slansy * @sa plasma_omp_slansy * ******************************************************************************/ void plasma_omp_slansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pslansy(norm, uplo, A, work, value, sequence, request); }
otbSampleAugmentation.h
/* * Copyright (C) 2005-2020 Centre National d'Etudes Spatiales (CNES) * * This file is part of Orfeo Toolbox * * https://www.orfeo-toolbox.org/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef otbSampleAugmentation_h #define otbSampleAugmentation_h #ifdef _OPENMP #include <omp.h> #endif #include <vector> #include <algorithm> #include <random> #include <ctime> #include <cassert> namespace otb { namespace sampleAugmentation { using SampleType = std::vector<double>; using SampleVectorType = std::vector<SampleType>; /** Estimate standard deviations of the components in one pass using Welford's algorithm */ SampleType EstimateStds(const SampleVectorType& samples) { const auto nbSamples = samples.size(); const long nbComponents = static_cast<long>(samples[0].size()); SampleType stds(nbComponents, 0.0); SampleType means(nbComponents, 0.0); for (size_t i = 0; i < nbSamples; ++i) { auto norm_factor = 1.0 / (i + 1); #ifdef _OPENMP #pragma omp parallel for #endif for (long j = 0; j < nbComponents; ++j) { const auto mu = means[j]; const auto x = samples[i][j]; auto muNew = mu + (x - mu) * norm_factor; stds[j] += (x - mu) * (x - muNew); means[j] = muNew; } } #ifdef _OPENMP #pragma omp parallel for #endif for (long j = 0; j < nbComponents; ++j) { stds[j] = std::sqrt(stds[j] / nbSamples); } return stds; } /** Create new samples by replicating input samples. We loop through * the input samples and add them to the new data set until nbSamples * are added. The elements of newSamples are removed before proceeding. */ void ReplicateSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples) { newSamples.resize(nbSamples); const long long nbSamplesLL = static_cast<long long>(nbSamples); size_t imod{0}; #ifdef _OPENMP #pragma omp parallel for #endif for (long long i = 0; i < nbSamplesLL; ++i) { if (imod == inSamples.size()) imod = 0; newSamples[i] = inSamples[imod++]; } } /** Create new samples by adding noise to existing samples. Gaussian * noise is added to randomly selected samples. The standard deviation * of the noise added to each component is the same as the one of the * input variables divided by stdFactor (defaults to 10). The * elements of newSamples are removed before proceeding. */ void JitterSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, float stdFactor = 10, const int seed = std::time(nullptr)) { newSamples.resize(nbSamples); const long nbComponents = static_cast<long>(inSamples[0].size()); std::random_device rd; std::mt19937 gen(rd()); // The input samples are selected randomly with replacement std::srand(seed); // We use one gaussian distribution per component since they may // have different stds auto stds = EstimateStds(inSamples); std::vector<std::normal_distribution<double>> gaussDis(nbComponents); #ifdef _OPENMP #pragma omp parallel for #endif for (long i = 0; i < nbComponents; ++i) gaussDis[i] = std::normal_distribution<double>{0.0, stds[i] / stdFactor}; for (size_t i = 0; i < nbSamples; ++i) { newSamples[i] = inSamples[std::rand() % inSamples.size()]; #ifdef _OPENMP #pragma omp parallel for #endif for (long j = 0; j < nbComponents; ++j) newSamples[i][j] += gaussDis[j](gen); } } struct NeighborType { size_t index; double distance; }; struct NeighborSorter { constexpr bool operator()(const NeighborType& a, const NeighborType& b) const { return b.distance > a.distance; } }; double ComputeSquareDistance(const SampleType& x, const SampleType& y) { assert(x.size() == y.size()); double dist{0}; for (size_t i = 0; i < x.size(); ++i) { dist += (x[i] - y[i]) * (x[i] - y[i]); } return dist / (x.size() * x.size()); } using NNIndicesType = std::vector<NeighborType>; using NNVectorType = std::vector<NNIndicesType>; /** Returns the indices of the nearest neighbors for each input sample */ void FindKNNIndices(const SampleVectorType& inSamples, const size_t nbNeighbors, NNVectorType& nnVector) { const long long nbSamples = static_cast<long long>(inSamples.size()); nnVector.resize(nbSamples); #ifdef _OPENMP #pragma omp parallel for #endif for (long long sampleIdx = 0; sampleIdx < nbSamples; ++sampleIdx) { NNIndicesType nns; for (long long neighborIdx = 0; neighborIdx < nbSamples; ++neighborIdx) { if (sampleIdx != neighborIdx) nns.push_back({static_cast<size_t>(neighborIdx), ComputeSquareDistance(inSamples[sampleIdx], inSamples[neighborIdx])}); } std::partial_sort(nns.begin(), nns.begin() + nbNeighbors, nns.end(), NeighborSorter{}); nns.resize(nbNeighbors); nnVector[sampleIdx] = std::move(nns); } } /** Generate the new sample in the line linking s1 and s2 */ SampleType SmoteCombine(const SampleType& s1, const SampleType& s2, double position) { auto result = s1; for (size_t i = 0; i < s1.size(); ++i) result[i] = s1[i] + (s2[i] - s1[i]) * position; return result; } /** Create new samples using the SMOTE algorithm Chawla, N. V., Bowyer, K. W., Hall, L. O., & Kegelmeyer, W. P., Smote: synthetic minority over-sampling technique, Journal of artificial intelligence research, 16(), 321–357 (2002). http://dx.doi.org/10.1613/jair.953 */ void Smote(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, const int nbNeighbors, const int seed = std::time(nullptr)) { newSamples.resize(nbSamples); const long long nbSamplesLL = static_cast<long long>(nbSamples); NNVectorType nnVector; FindKNNIndices(inSamples, nbNeighbors, nnVector); // The input samples are selected randomly with replacement std::srand(seed); #ifdef _OPENMP #pragma omp parallel for #endif for (long long i = 0; i < nbSamplesLL; ++i) { const auto sampleIdx = std::rand() % (inSamples.size()); const auto sample = inSamples[sampleIdx]; const auto neighborIdx = nnVector[sampleIdx][std::rand() % nbNeighbors].index; const auto neighbor = inSamples[neighborIdx]; newSamples[i] = SmoteCombine(sample, neighbor, std::rand() / double{RAND_MAX}); } } } // end namespaces sampleAugmentation } // end namespace otb #endif
vector.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_onedpl.hpp" #include "seq_mv.h" #include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_SeqVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCreate( HYPRE_Int size ) { hypre_Vector *vector; vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST); hypre_VectorData(vector) = NULL; hypre_VectorSize(vector) = size; hypre_VectorNumVectors(vector) = 1; hypre_VectorMultiVecStorageMethod(vector) = 0; /* set defaults */ hypre_VectorOwnsData(vector) = 1; hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle()); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Vector *vector = hypre_SeqVectorCreate(size); hypre_VectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorDestroy( hypre_Vector *vector ) { HYPRE_Int ierr = 0; if (vector) { HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector); if ( hypre_VectorOwnsData(vector) ) { hypre_TFree(hypre_VectorData(vector), memory_location); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(vector); HYPRE_Int ierr = 0; HYPRE_Int num_vectors = hypre_VectorNumVectors(vector); HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector); hypre_VectorMemoryLocation(vector) = memory_location; /* Caveat: for pre-existing data, the memory location must be guaranteed * to be consistent with `memory_location' * Otherwise, mismatches will exist and problems will be encountered * when being used, and freed */ if ( !hypre_VectorData(vector) ) { hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors * size, memory_location); } if ( multivec_storage_method == 0 ) { hypre_VectorVectorStride(vector) = size; hypre_VectorIndexStride(vector) = 1; } else if ( multivec_storage_method == 1 ) { hypre_VectorVectorStride(vector) = 1; hypre_VectorIndexStride(vector) = num_vectors; } else { ++ierr; } return ierr; } HYPRE_Int hypre_SeqVectorInitialize( hypre_Vector *vector ) { HYPRE_Int ierr; ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetDataOwner( hypre_Vector *vector, HYPRE_Int owns_data ) { HYPRE_Int ierr = 0; hypre_VectorOwnsData(vector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * ReadVector *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorRead( char *file_name ) { hypre_Vector *vector; FILE *fp; HYPRE_Complex *data; HYPRE_Int size; HYPRE_Int j; /*---------------------------------------------------------- * Read in the data *----------------------------------------------------------*/ fp = fopen(file_name, "r"); hypre_fscanf(fp, "%d", &size); vector = hypre_SeqVectorCreate(size); hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST; hypre_SeqVectorInitialize(vector); data = hypre_VectorData(vector); for (j = 0; j < size; j++) { hypre_fscanf(fp, "%le", &data[j]); } fclose(fp); /* multivector code not written yet */ hypre_assert( hypre_VectorNumVectors(vector) == 1 ); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorPrint( hypre_Vector *vector, char *file_name ) { FILE *fp; HYPRE_Complex *data; HYPRE_Int size, num_vectors, vecstride, idxstride; HYPRE_Int i, j; HYPRE_Complex value; HYPRE_Int ierr = 0; num_vectors = hypre_VectorNumVectors(vector); vecstride = hypre_VectorVectorStride(vector); idxstride = hypre_VectorIndexStride(vector); /*---------------------------------------------------------- * Print in the data *----------------------------------------------------------*/ data = hypre_VectorData(vector); size = hypre_VectorSize(vector); fp = fopen(file_name, "w"); if ( hypre_VectorNumVectors(vector) == 1 ) { hypre_fprintf(fp, "%d\n", size); } else { hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size ); } if ( num_vectors > 1 ) { for ( j = 0; j < num_vectors; ++j ) { hypre_fprintf(fp, "vector %d\n", j ); for (i = 0; i < size; i++) { value = data[ j * vecstride + i * idxstride ]; #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(value), hypre_cimag(value)); #else hypre_fprintf(fp, "%.14e\n", value); #endif } } } else { for (i = 0; i < size; i++) { #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(data[i]), hypre_cimag(data[i])); #else hypre_fprintf(fp, "%.14e\n", data[i]); #endif } } fclose(fp); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetConstantValues( hypre_Vector *v, HYPRE_Complex value ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(v); //hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (size > 0) { HYPRE_THRUST_CALL( fill_n, vector_data, size, value ); } #elif defined(HYPRE_USING_SYCL) if (size > 0) { HYPRE_ONEDPL_CALL( std::fill_n, vector_data, size, value ); } #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { vector_data[i] = value; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetRandomValues( hypre_Vector *v, HYPRE_Int seed ) { HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; hypre_SeedRand(seed); size *= hypre_VectorNumVectors(v); if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST) { /* RDF: threading this loop may cause problems because of hypre_Rand() */ for (i = 0; i < size; i++) { vector_data[i] = 2.0 * hypre_Rand() - 1.0; } } else { HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); for (i = 0; i < size; i++) { h_data[i] = 2.0 * hypre_Rand() - 1.0; } hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST); hypre_TFree(h_data, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCopy * copies data from x to y * if size of x is larger than y only the first size_y elements of x are * copied to y *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorCopy( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x); hypre_TMemcpy( hypre_VectorData(y), hypre_VectorData(x), HYPRE_Complex, size, hypre_VectorMemoryLocation(y), hypre_VectorMemoryLocation(x) ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_Vector* hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_SeqVectorInitialize_v2(y, memory_location); hypre_SeqVectorCopy( x, y ); return y; } hypre_Vector* hypre_SeqVectorCloneDeep( hypre_Vector *x ) { return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x)); } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneShallow * Returns a complete copy of x - a shallow copy, pointing the data of x *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneShallow( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x); hypre_VectorData(y) = hypre_VectorData(x); hypre_SeqVectorSetDataOwner( y, 0 ); hypre_SeqVectorInitialize(y); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorScale( HYPRE_Complex alpha, hypre_Vector *y ) { /* special cases */ if (alpha == 1.0) { return 0; } if (alpha == 0.0) { return hypre_SeqVectorSetConstantValues(y, 0.0); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(y); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(y); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_SYCL_CALL( oneapi::mkl::blas::scal(*hypre_HandleComputeStream(hypre_handle()), size, alpha, y_data, 1).wait() ); #else HYPRE_ONEDPL_CALL( std::transform, y_data, y_data + size, y_data, [alpha](HYPRE_Complex y) -> HYPRE_Complex { return alpha * y; } ); #endif // #if defined(HYPRE_USING_ONEMKL) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] *= alpha; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorAxpy( HYPRE_Complex alpha, hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_SYCL_CALL( oneapi::mkl::blas::axpy(*hypre_HandleComputeStream(hypre_handle()), size, alpha, x_data, 1, y_data, 1).wait() ); #else HYPRE_ONEDPL_CALL( std::transform, x_data, x_data + size, y_data, y_data, [alpha](HYPRE_Complex x, HYPRE_Complex y) -> HYPRE_Complex { return alpha * x + y; } ); #endif // #if defined(HYPRE_USING_ONEMKL) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha * x_data[i]; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /* y = y + x ./ b */ HYPRE_Int hypre_SeqVectorElmdivpy( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { //TODO //hypre_SeqVectorElmdivpyDevice(x, b, y); /* #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms) #endif */ hypreDevice_IVAXPY(size, b_data, x_data, y_data); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += x_data[i] / b_data[i]; } } #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* y[i] += x[i] / b[i] where marker[i] == marker_val */ HYPRE_Int hypre_SeqVectorElmdivpyMarked( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y, HYPRE_Int *marker, HYPRE_Int marker_val) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); if (exec == HYPRE_EXEC_DEVICE) { hypreDevice_IVAXPYMarked(size, b_data, x_data, y_data, marker, marker_val); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { if (marker[i] == marker_val) { y_data[i] += x_data[i] / b_data[i]; } } } #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_SeqVectorInnerProd( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Real result = 0.0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #ifndef HYPRE_COMPLEX #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) ); #else result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_Real *result_dev = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_DEVICE); HYPRE_SYCL_CALL( oneapi::mkl::blas::dot(*hypre_HandleComputeStream(hypre_handle()), size, x_data, 1, y_data, 1, result_dev).wait() ); hypre_TMemcpy(&result, result_dev, HYPRE_Real, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(result_dev, HYPRE_MEMORY_DEVICE); #else result = HYPRE_ONEDPL_CALL( std::transform_reduce, x_data, x_data + size, y_data, 0.0 ); #endif // #if defined(HYPRE_USING_ONEMKLBLAS) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #ifndef HYPRE_COMPLEX /* TODO */ #error "Complex inner product" #endif // #ifndef HYPRE_COMPLEX #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { result += hypre_conj(y_data[i]) * x_data[i]; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return result; } //TODO /*-------------------------------------------------------------------------- * hypre_VectorSumElts: * Returns the sum of all vector elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_VectorData( vector ); HYPRE_Int size = hypre_VectorSize( vector ); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for ( i = 0; i < size; ++i ) { sum += data[i]; } return sum; } HYPRE_Int hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location) { HYPRE_Int ierr = 0; #ifdef HYPRE_USING_UNIFIED_MEMORY if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE) { /* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/ return 1; } HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x); if (size == 0) { return ierr; } hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location); #endif return ierr; } #if 0 /* y[i] = max(alpha*x[i], beta*y[i]) */ HYPRE_Int hypre_SeqVectorMax( HYPRE_Complex alpha, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); thrust::maximum<HYPRE_Complex> mx; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_THRUST_CALL( transform, thrust::make_transform_iterator(x_data, alpha * _1), thrust::make_transform_iterator(x_data + size, alpha * _1), thrust::make_transform_iterator(y_data, beta * _1), y_data, mx ); #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]); } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ hypre_SyncComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } #endif
omp_for_schedule_auto.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <stdlib.h> #include <math.h> #include "omp_testsuite.h" int sum1; #pragma omp threadprivate(sum1) int test_omp_for_auto() { int j; int sum; int sum0; int known_sum; int threadsnum; sum = 0; sum0 = 12345; // array which keeps track of which threads participated in the for loop // e.g., given 4 threads, [ 0 | 1 | 1 | 0 ] implies // threads 0 and 3 did not, threads 1 and 2 did int max_threads = omp_get_max_threads(); int* active_threads = (int*)malloc(sizeof(int)*max_threads); for(j = 0; j < max_threads; j++) active_threads[j] = 0; #pragma omp parallel { int i; sum1 = 0; #pragma omp for firstprivate(sum0) schedule(auto) for (i = 1; i <= LOOPCOUNT; i++) { active_threads[omp_get_thread_num()] = 1; sum0 = sum0 + i; sum1 = sum0; } #pragma omp critical { sum = sum + sum1; } } // count the threads that participated (sum is stored in threadsnum) threadsnum=0; for(j = 0; j < max_threads; j++) { if(active_threads[j]) threadsnum++; } free(active_threads); known_sum = 12345 * threadsnum + (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_auto()) { num_failed++; } } return num_failed; }
cones.c
/* * The MIT License (MIT) * * Copyright (c) 2017 Pantelis Sopasakis (https://alphaville.github.io), * Krina Menounou (https://www.linkedin.com/in/krinamenounou), * Panagiotis Patrinos (http://homes.esat.kuleuven.be/~ppatrino) * Copyright (c) 2012 Brendan O'Donoghue (bodonoghue85@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include "scs.h" #include "scs_blas.h" /* contains BLAS(X) macros and type info */ #define CONE_RATE (2) #define CONE_TOL (1e-8) #define CONE_THRESH (1e-6) #define EXP_CONE_MAX_ITERS (100) #define POW_CONE_MAX_ITERS (20) #ifdef LAPACK_LIB_FOUND extern void BLAS(syevr)(const char *jobz, const char *range, const char *uplo, blasint *n, scs_float *a, blasint *lda, scs_float *vl, scs_float *vu, blasint *il, blasint *iu, scs_float *abstol, blasint *m, scs_float *w, scs_float *z, blasint *ldz, blasint *isuppz, scs_float *work, blasint *lwork, blasint *iwork, blasint *liwork, blasint *info); extern void BLAS(syr)(const char *uplo, const blasint *n, const scs_float *alpha, const scs_float *x, const blasint *incx, scs_float *a, const blasint *lda); extern void BLAS(scal)(const blasint *n, const scs_float *sa, scs_float *sx, const blasint *incx); extern scs_float BLAS(nrm2)(const blasint *n, scs_float *x, const blasint *incx); #endif static scs_int getSdConeSize(scs_int s) { return (s * (s + 1)) / 2; } /* * boundaries will contain array of indices of rows of A corresponding to * cone boundaries, boundaries[0] is starting index for cones of size strictly * larger than 1 * returns length of boundaries array, boundaries malloc-ed here so should be * freed */ scs_int scs_get_cone_boundaries( const ScsCone * RESTRICT k, scs_int * * RESTRICT boundaries) { scs_int i, count = 0; scs_int len = 1 + k->qsize + k->ssize + k->ed + k->ep + k->psize; scs_int *RESTRICT b = scs_malloc(sizeof (scs_int) * len); b[count] = k->f + k->l; count += 1; if (k->qsize > 0) { memcpy(&b[count], k->q, k->qsize * sizeof (scs_int)); } count += k->qsize; for (i = 0; i < k->ssize; ++i) { b[count + i] = getSdConeSize(k->s[i]); } count += k->ssize; for (i = 0; i < k->ep + k->ed; ++i) { b[count + i] = 3; } count += k->ep + k->ed; for (i = 0; i < k->psize; ++i) { b[count + i] = 3; } /* count += k->psize; */ *boundaries = b; return len; } static scs_int getFullConeDims(const ScsCone *RESTRICT k) { scs_int i, c = 0; if (k->f) c += k->f; if (k->l) c += k->l; if (k->qsize && k->q) { for (i = 0; i < k->qsize; ++i) { c += k->q[i]; } } if (k->ssize && k->s) { for (i = 0; i < k->ssize; ++i) { c += getSdConeSize(k->s[i]); } } if (k->ed) c += 3 * k->ed; if (k->ep) c += 3 * k->ep; if (k->p) c += 3 * k->psize; return c; } scs_int scs_validate_cones(const ScsData * RESTRICT d, const ScsCone * RESTRICT k) { scs_int i; if (getFullConeDims(k) != d->m) { scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n", (long) getFullConeDims(k), (long) d->m); return -1; } if (k->f && k->f < 0) { scs_printf("free cone error\n"); return -1; } if (k->l && k->l < 0) { scs_printf("lp cone error\n"); return -1; } if (k->qsize && k->q) { if (k->qsize < 0) { scs_printf("soc cone error\n"); return -1; } for (i = 0; i < k->qsize; ++i) { if (k->q[i] < 0) { scs_printf("soc cone error\n"); return -1; } } } if (k->ssize && k->s) { if (k->ssize < 0) { scs_printf("sd cone error\n"); return -1; } for (i = 0; i < k->ssize; ++i) { if (k->s[i] < 0) { scs_printf("sd cone error\n"); return -1; } } } if (k->ed && k->ed < 0) { scs_printf("ep cone error\n"); return -1; } if (k->ep && k->ep < 0) { scs_printf("ed cone error\n"); return -1; } if (k->psize && k->p) { if (k->psize < 0) { scs_printf("power cone error\n"); return -1; } for (i = 0; i < k->psize; ++i) { if (k->p[i] < -1 || k->p[i] > 1) { scs_printf("power cone error, values must be in [-1,1]\n"); return -1; } } } return 0; } char *scs_get_cone_summary(const ScsInfo * RESTRICT info, ScsConeWork * RESTRICT c) { char *str = scs_malloc(sizeof (char) * 64); sprintf(str, "\tCones: avg projection time: %1.2es\n", c->total_cone_time / (info->iter + 1) / 1e3); c->total_cone_time = 0.0; return str; } void scs_finish_cone(ScsConeWork * RESTRICT c) { #ifdef LAPACK_LIB_FOUND scs_free(c->Xs); scs_free(c->Z); scs_free(c->e); scs_free(c->work); scs_free(c->iwork); #endif scs_free(c); } char *scs_get_cone_header(const ScsCone * RESTRICT k) { char *tmp = scs_malloc(sizeof (char) * 512); scs_int i, socVars, socBlks, sdVars, sdBlks; sprintf(tmp, "Cones:"); if (k->f) { sprintf(tmp + strlen(tmp), "\tprimal zero / dual free vars: %li\n", (long) k->f); } if (k->l) { sprintf(tmp + strlen(tmp), "\tlinear vars: %li\n", (long) k->l); } socVars = 0; socBlks = 0; if (k->qsize && k->q) { socBlks = k->qsize; for (i = 0; i < k->qsize; i++) { socVars += k->q[i]; } sprintf(tmp + strlen(tmp), "\tsoc vars: %li, soc blks: %li\n", (long) socVars, (long) socBlks); } sdVars = 0; sdBlks = 0; if (k->ssize && k->s) { sdBlks = k->ssize; for (i = 0; i < k->ssize; i++) { sdVars += getSdConeSize(k->s[i]); } sprintf(tmp + strlen(tmp), "\tsd vars: %li, sd blks: %li\n", (long) sdVars, (long) sdBlks); } if (k->ep || k->ed) { sprintf(tmp + strlen(tmp), "\texp vars: %li, dual exp vars: %li\n", (long) 3 * k->ep, (long) 3 * k->ed); } if (k->psize && k->p) { sprintf(tmp + strlen(tmp), "\tprimal + dual power vars: %li\n", (long) 3 * k->psize); } return tmp; } static scs_int isSimpleSemiDefiniteCone(scs_int * RESTRICT s, scs_int ssize) { scs_int i; for (i = 0; i < ssize; i++) { if (s[i] > 2) { return 0; /* false */ } } return 1; /* true */ } static scs_float expNewtonOneD(scs_float rho, scs_float y_hat, scs_float z_hat) { scs_float t = MAX(-z_hat, 1e-6); scs_float f, fp; scs_int i; for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1; fp = (2 * t + z_hat) / rho / rho + 1 / t; t = t - f / fp; if (t <= -z_hat) { return 0; } else if (t <= 0) { return z_hat; } else if (ABS(f) < CONE_TOL) { break; } } return t + z_hat; } static void expSolveForXWithRho(scs_float * RESTRICT v, scs_float * RESTRICT x, scs_float rho) { x[2] = expNewtonOneD(rho, v[1], v[2]); x[1] = (x[2] - v[2]) * x[2] / rho; x[0] = v[0] - rho; } static scs_float expCalcGrad(scs_float * RESTRICT v, scs_float * RESTRICT x, scs_float rho) { expSolveForXWithRho(v, x, rho); if (x[1] <= 1e-12) { return x[0]; } return x[0] + x[1] * log(x[1] / x[2]); } static void expGetRhoUb(scs_float * RESTRICT v, scs_float * RESTRICT x, scs_float * RESTRICT ub, scs_float *lb) { *lb = 0; *ub = 0.125; while (expCalcGrad(v, x, *ub) > 0) { *lb = *ub; (*ub) *= 2; } } /* project onto the exponential cone, v has dimension *exactly* 3 */ static scs_int projExpCone(scs_float * RESTRICT v, scs_int iter) { scs_int i; scs_float ub, lb, rho, g, x[3]; scs_float r = v[0], s = v[1], t = v[2]; scs_float tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF((iter + 1), CONE_RATE)); */ /* v in cl(Kexp) */ if ((s > 0 && s * exp(r / s) - t <= CONE_THRESH) || (r <= 0 && s == 0 && t >= 0)) { return 0; } /* -v in Kexp^* */ if ((-r < 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) || (-r == 0 && -s >= 0 && -t >= 0)) { memset(v, 0, 3 * sizeof (scs_float)); return 0; } /* special case with analytical solution */ if (r < 0 && s < 0) { v[1] = 0.0; v[2] = MAX(v[2], 0); return 0; } /* iterative procedure to find projection, bisects on dual variable: */ expGetRhoUb(v, x, &ub, &lb); /* get starting upper and lower bounds */ for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { rho = (ub + lb) / 2; /* halfway between upper and lower bounds */ g = expCalcGrad(v, x, rho); /* calculates gradient wrt dual var */ if (g > 0) { lb = rho; } else { ub = rho; } if (ub - lb < tol) { break; } } v[0] = x[0]; v[1] = x[1]; v[2] = x[2]; return 0; } static scs_int setUpSdScsConeWorkSpace(ScsConeWork * RESTRICT c, const ScsCone * RESTRICT k) { #ifdef LAPACK_LIB_FOUND scs_int i; blasint nMax = 0; scs_float eigTol = 1e-8; blasint negOne = -1; blasint m = 0; blasint info; scs_float wkopt; /* eigenvector decomp workspace */ for (i = 0; i < k->ssize; ++i) { if (k->s[i] > nMax) { nMax = (blasint) k->s[i]; } } c->Xs = scs_calloc(nMax * nMax, sizeof (scs_float)); c->Z = scs_calloc(nMax * nMax, sizeof (scs_float)); c->e = scs_calloc(nMax, sizeof (scs_float)); BLAS(syevr)("Vectors", "All", "Lower", &nMax, c->Xs, &nMax, SCS_NULL, SCS_NULL, SCS_NULL, SCS_NULL, &eigTol, &m, c->e, c->Z, &nMax, SCS_NULL, &wkopt, &negOne, &(c->liwork), &negOne, &info); if (info != 0) { scs_printf("FATAL: syevr failure, info = %li\n", (long) info); return -1; } c->lwork = (blasint) (wkopt + 0.01); /* 0.01 for int casting safety */ c->work = scs_malloc(c->lwork * sizeof (scs_float)); c->iwork = scs_malloc(c->liwork * sizeof (blasint)); if (c->Xs == SCS_NULL || c->Z == SCS_NULL || c->e == SCS_NULL || c->work == SCS_NULL || c->iwork == SCS_NULL) { return -1; } return 0; #else scs_printf("FATAL: Cannot solve SDPs with > 2x2 matrices without linked " "blas+lapack libraries\n"); scs_printf("Install blas+lapack and re-compile SCS with blas+lapack libray " "locations\n"); return -1; #endif } ScsConeWork *scs_init_conework(const ScsCone * RESTRICT k) { ScsConeWork * RESTRICT coneWork = scs_calloc(1, sizeof (ScsConeWork)); coneWork->total_cone_time = 0.0; if (k->ssize && k->s) { if (isSimpleSemiDefiniteCone(k->s, k->ssize) == 0 && setUpSdScsConeWorkSpace(coneWork, k) < 0) { scs_finish_cone(coneWork); return SCS_NULL; } } return coneWork; } scs_int project2By2Sdc(scs_float *X) { scs_float a, b, d, l1, l2, x1, x2, rad; scs_float sqrt2 = SQRTF(2.0); a = X[0]; b = X[1] / sqrt2; d = X[2]; if (ABS(b) < 1e-6) { /* diagonal matrix */ X[0] = MAX(a, 0); X[1] = 0; X[2] = MAX(d, 0); return 0; } rad = SQRTF((a - d) * (a - d) + 4 * b * b); /* l1 >= l2 always, since rad >= 0 */ l1 = 0.5 * (a + d + rad); l2 = 0.5 * (a + d - rad); if (l2 >= 0) { /* both eigs positive already */ return 0; } if (l1 <= 0) { /* both eigs negative, set to 0 */ X[0] = 0; X[1] = 0; X[2] = 0; return 0; } /* l1 pos, l2 neg */ x1 = 1 / SQRTF(1 + (l1 - a) * (l1 - a) / b / b); x2 = x1 * (l1 - a) / b; X[0] = l1 * x1 * x1; X[1] = (l1 * x1 * x2) * sqrt2; X[2] = l1 * x2 * x2; return 0; } /* size of X is getSdConeSize(n) */ static scs_int projSemiDefiniteCone( scs_float * RESTRICT X, const scs_int n, ScsConeWork * RESTRICT c, const scs_int iter) { /* project onto the positive semi-definite cone */ #ifdef LAPACK_LIB_FOUND scs_int i; blasint one = 1; blasint m = 0; blasint nb = (blasint) n; blasint nbPlusOne = (blasint) (n + 1); blasint coneSz = (blasint) (getSdConeSize(n)); scs_float sqrt2 = SQRTF(2.0); scs_float sqrt2Inv = 1.0 / sqrt2; scs_float *RESTRICT Xs = c->Xs; scs_float *RESTRICT Z = c->Z; scs_float *RESTRICT e = c->e; scs_float *RESTRICT work = c->work; blasint *RESTRICT iwork = c->iwork; blasint lwork = c->lwork; blasint liwork = c->liwork; scs_float eigTol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF(iter + 1, CONE_RATE)); */ scs_float zero = 0.0; blasint info; scs_float vupper; #endif /* LAPACK_LIB_FOUND */ if (n == 0) { return 0; } if (n == 1) { if (X[0] < 0.0) { X[0] = 0.0; } return 0; } if (n == 2) { return project2By2Sdc(X); } #ifdef LAPACK_LIB_FOUND /* expand lower triangular matrix to full matrix */ for (i = 0; i < n; ++i) { memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]), (n - i) * sizeof (scs_float)); } /* rescale so projection works, and matrix norm preserved see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3 */ /* scale diags by sqrt(2) */ BLAS(scal)(&nb, &sqrt2, Xs, &nbPlusOne); /* not nSquared */ /* max-eig upper bounded by frobenius norm */ vupper = 1.1 * sqrt2 * BLAS(nrm2)(&coneSz, X, &one); /* mult by factor to make sure is upper bound */ vupper = MAX(vupper, 0.01); /* Solve eigenproblem, reuse workspaces */ BLAS(syevr)("Vectors", "VInterval", "Lower", &nb, Xs, &nb, &zero, &vupper, SCS_NULL, SCS_NULL, &eigTol, &m, e, Z, &nb, SCS_NULL, work, &lwork, iwork, &liwork, &info); if (info < 0) return -1; memset(Xs, 0, n * n * sizeof (scs_float)); for (i = 0; i < m; ++i) { scs_float a = e[i]; BLAS(syr)("Lower", &nb, &a, &(Z[i * n]), &one, Xs, &nb); } /* scale diags by 1/sqrt(2) */ BLAS(scal)(&nb, &sqrt2Inv, Xs, &nbPlusOne); /* not nSquared */ /* extract just lower triangular matrix */ for (i = 0; i < n; ++i) { memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]), (n - i) * sizeof (scs_float)); } #else /* LAPACK_LIB_FOUND */ scs_printf("FAILURE: solving SDP with > 2x2 matrices, but no blas/lapack " "libraries were linked!\n"); scs_printf("SCS will return nonsense!\n"); scs_scale_array(X, NAN, n); return -1; #endif /* LAPACK_LIB_FOUND */ return 0; } static scs_float powCalcX(scs_float r, scs_float xh, scs_float rh, scs_float a) { scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r)); return MAX(x, 1e-12); } static scs_float powCalcdxdr(scs_float x, scs_float xh, scs_float rh, scs_float r, scs_float a) { return a * (rh - 2 * r) / (2 * x - xh); } static scs_float powCalcF(scs_float x, scs_float y, scs_float r, scs_float a) { return POWF(x, a) * POWF(y, (1 - a)) - r; } static scs_float powCalcFp(scs_float x, scs_float y, scs_float dxdr, scs_float dydr, scs_float a) { return POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) - 1; } static void projPowerCone(scs_float *RESTRICT v, scs_float a) { scs_float xh = v[0], yh = v[1], rh = ABS(v[2]); scs_float x, y, r; scs_int i; /* v in K_a */ if (xh >= 0 && yh >= 0 && CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) return; /* -v in K_a^* */ if (xh <= 0 && yh <= 0 && CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >= rh * POWF(a, a) * POWF(1 - a, 1 - a)) { v[0] = v[1] = v[2] = 0; return; } r = rh / 2; for (i = 0; i < POW_CONE_MAX_ITERS; ++i) { scs_float f, fp, dxdr, dydr; x = powCalcX(r, xh, rh, a); y = powCalcX(r, yh, rh, 1 - a); f = powCalcF(x, y, r, a); if (ABS(f) < CONE_TOL) break; dxdr = powCalcdxdr(x, xh, rh, r, a); dydr = powCalcdxdr(y, yh, rh, r, (1 - a)); fp = powCalcFp(x, y, dxdr, dydr, a); r = MAX(r - f / fp, 0); r = MIN(r, rh); } v[0] = x; v[1] = y; v[2] = (v[2] < 0) ? -(r) : (r); } /* outward facing cone projection routine, iter is outer algorithm iteration, if iter < 0 then iter is ignored warm_start contains guess of projection (can be set to SCS_NULL) */ scs_int scs_project_dual_cone( scs_float * RESTRICT x, const ScsCone * RESTRICT k, ScsConeWork * RESTRICT c, const scs_float * RESTRICT warm_start, scs_int iter) { scs_int i; scs_int count = (k->f ? k->f : 0); ScsTimer coneTimer; scs_tic(&coneTimer); if (k->l) { /* project onto positive orthant */ for (i = count; i < count + k->l; ++i) { if (x[i] < 0.0) x[i] = 0.0; /* x[i] = (x[i] < 0.0) ? 0.0 : x[i]; */ } count += k->l; } if (k->qsize && k->q) { /* project onto SOC */ for (i = 0; i < k->qsize; ++i) { if (k->q[i] == 0) { continue; } if (k->q[i] == 1) { if (x[count] < 0.0) x[count] = 0.0; } else { scs_float v1 = x[count]; scs_float s = scs_norm(&(x[count + 1]), k->q[i] - 1); scs_float alpha = (s + v1) / 2.0; if (s <= v1) { /* do nothing */ } else if (s <= -v1) { memset(&(x[count]), 0, k->q[i] * sizeof (scs_float)); } else { x[count] = alpha; scs_scale_array(&(x[count + 1]), alpha / s, k->q[i] - 1); } } count += k->q[i]; } } if (k->ssize && k->s) { /* project onto PSD cone */ for (i = 0; i < k->ssize; ++i) { if (k->s[i] == 0) { continue; } if (projSemiDefiniteCone(&(x[count]), k->s[i], c, iter) < 0) return -1; count += getSdConeSize(k->s[i]); } } if (k->ep) { scs_float r, s, t; scs_int idx; /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ scs_scale_array(&(x[count]), -1, 3 * k->ep); /* x = -x; */ #ifdef _OPENMP #pragma omp parallel for private(r, s, t, idx) #endif for (i = 0; i < k->ep; ++i) { idx = count + 3 * i; r = x[idx]; s = x[idx + 1]; t = x[idx + 2]; projExpCone(&(x[idx]), iter); x[idx] -= r; x[idx + 1] -= s; x[idx + 2] -= t; } count += 3 * k->ep; } if (k->ed) { /* exponential cone: */ #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < k->ed; ++i) { projExpCone(&(x[count + 3 * i]), iter); } count += 3 * k->ed; } if (k->psize && k->p) { scs_float v[3]; scs_int idx; /* don't use openmp for power cone ifdef _OPENMP pragma omp parallel for private(v, idx) endif */ for (i = 0; i < k->psize; ++i) { idx = count + 3 * i; if (k->p[i] <= 0) { /* dual power cone */ projPowerCone(&(x[idx]), -k->p[i]); } else { /* primal power cone, using Moreau */ v[0] = -x[idx]; v[1] = -x[idx + 1]; v[2] = -x[idx + 2]; projPowerCone(v, k->p[i]); x[idx] += v[0]; x[idx + 1] += v[1]; x[idx + 2] += v[2]; } } /* count += 3 * k->psize; */ } /* project onto OTHER cones */ if (c) { c->total_cone_time += scs_toc_quiet(&coneTimer); } return 0; }
prog.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> #include "timer.h" int randInt(int min, int max) { return (rand() % (max - min + 1)) + min; } int * randIntArray(unsigned int length, int min, int max) { int *array; unsigned int i; array = malloc(sizeof(*array) * length); for (i = 0; i < length; ++i) { array[i] = randInt(min, max); } return array; } void printIntArray(int *array, unsigned int length, FILE* stream) { unsigned int i; for (i = 0; i < length; ++i) { fprintf(stream, "%02d ", array[i]); } } void printlnIntArray(int *array, unsigned int length, FILE* stream) { printIntArray(array, length, stream); fprintf(stream, "\n"); } int main(int argc, char const *argv[]) { int numThreads; unsigned int length; int minRand; int maxRand; double mean; int moduloProduct; int minimumValue; int maximumValue; int *values; double inicioTotal, fimTotal, tempoTotal; double inicio[4], fim[4], tempo[4]; unsigned int i; if (argc < 3) { printf("Error missing command line argument.\n"); return 1; } length = atoi(argv[1]); minRand = atoi(argv[2]); maxRand = atoi(argv[3]); numThreads = atoi(argv[4]); values = randIntArray(length, minRand, maxRand); GET_TIME(inicioTotal); #pragma omp parallel num_threads(numThreads) private(i) { #pragma omp sections nowait { #pragma omp section { GET_TIME(inicio[0]); mean = 0; for (i = 0; i < length; ++i) { mean += values[i]; } mean /= (double)length; GET_TIME(fim[0]); tempo[0] = fim[0] - inicio[0]; } #pragma omp section { GET_TIME(inicio[1]); minimumValue = maxRand; for (i = 0; i < length; ++i) { if (values[i] < minimumValue) { minimumValue = values[i]; } } GET_TIME(fim[1]); tempo[1] = fim[1] - inicio[1]; } #pragma omp section { GET_TIME(inicio[2]); maximumValue = minRand; for (i = 0; i < length; ++i) { if (values[i] > maximumValue) { maximumValue = values[i]; } } GET_TIME(fim[2]); tempo[2] = fim[2] - inicio[2]; } #pragma omp section { GET_TIME(inicio[3]); moduloProduct = 1; for (i = 0; i < length; ++i) { moduloProduct = (moduloProduct * ((int)pow(values[i], 2.0))) % 10000007; } GET_TIME(fim[3]); tempo[3] = fim[3] - inicio[3]; } } } GET_TIME(fimTotal); tempoTotal = fimTotal - inicioTotal; // for (i = 0; i < 4; ++i) { // printf("Tempo tarefa %d: %.8lf\n", i, tempo[i]); // } printf("Tempo Total: %.8lf\n", tempoTotal); // printf("Mean: %02.2lf\n", mean); // printf("Min: %d\n", minimumValue); // printf("Max: %d\n", maximumValue); // printf("Modulo Product: %d\n", moduloProduct); free(values); return 0; }
1556.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute parallel for num_threads(8) dist_schedule(static, 16) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
ompUtils.h
#ifndef OMP_UTILS_H #define OMP_UTILS_H #include "DataflowCFG.h" #include "CFGRewrite.h" #include <string> #include <list> #include <iostream> #include <sstream> #include <boost/regex.hpp> class ompUtils { public: static const int unknown_type = -1; static const int omp_parallel = 0; static const int omp_for = 1; static const int omp_parallel_for = 2; static const int omp_sections = 3; static const int omp_section = 4; static const int omp_single = 5; static const int omp_master = 6; static const int omp_critical = 7; static const int omp_barrier = 8; static const int omp_atomic = 9; static const int omp_flush = 10; static const int omp_threadprivate = 11; static bool match_OMP_String(std::string directive, std::string targetStr); static int getOmpType(std::string directive); static std::string ompTypeStr(int ompType); }; /******************************* ***** R E D U C T I O N S ***** *******************************/ class reductionOperation { public: static const int unknown_type = -1; static const int reduction_plus = 0; static const int reduction_multiply = 1; static const int reduction_minus = 2; static const int reduction_logic_AND = 3; static const int reduction_logic_XOR = 4; static const int reduction_logic_OR = 5; static const int reduction_boolean_AND = 6; static const int reduction_boolean_OR = 7; protected: int op; public: reductionOperation(); reductionOperation(const reductionOperation &that); reductionOperation(std::string opStr); void init(std::string opStr); std::string str(); }; class ompReduction { reductionOperation operation; list<std::string> variables; public: ompReduction(const ompReduction& that); // numRedVars - the number of reduction variables in this match (needed to get around a boost regex bug) ompReduction(boost::smatch what, int numRedVars); std::string str(); int numVars(); reductionOperation& getOp(); }; class ompReductionsSet { list<ompReduction> reductions; public: ompReductionsSet(); ompReductionsSet(const ompReductionsSet& that); ompReductionsSet(std::string directive); int numReductions(); std::string str(); }; /*********************************** ***** D A T A C L A U S E S ***** ***********************************/ class ompDataClause { std::string clauseType; list<std::string> variables; public: ompDataClause(); ompDataClause(const ompDataClause &that); ompDataClause(std::string directive, std::string clauseType); int numVars() const; const list<std::string>& getVars() const; void addVar(std::string var); std::string str() const; }; class defaultVarSharing { public: static const int var_sharing_class_unknown = 0; static const int var_sharing_class_none = 1; static const int var_sharing_class_shared = 2; protected: int type; public: defaultVarSharing(const defaultVarSharing& that); defaultVarSharing(std::string directive); std::string str(); }; /******************************** ***** #pragma omp parallel ***** ********************************/ class ompParallelFor; class ompParallel { public: ompReductionsSet reductions; ompDataClause privateVars; ompDataClause firstPrivateVars; ompDataClause lastPrivateVars; ompDataClause sharedVars; ompDataClause copyinVars; defaultVarSharing varSharingClass; ompParallel(std::string directive, bool verbose=false); ompParallel(ompParallelFor opf); ompParallel(ompReductionsSet oldReductions, ompDataClause oldPrivateVars, ompDataClause oldFirstPrivateVars, ompDataClause oldLastPrivateVars, ompDataClause oldSharedVars, ompDataClause oldCopyinVars, defaultVarSharing oldVarSharingClass); std::string str(); std::string str_nopragma(); }; /*************************** ***** #pragma omp for ***** ***************************/ class ompFor { bool ordered; bool nowait; public: ompReductionsSet reductions; ompDataClause privateVars; ompDataClause firstPrivateVars; ompDataClause lastPrivateVars; ompFor(std::string directive, bool verbose = false); ompFor(ompReductionsSet oldReductions, ompDataClause oldPrivateVars, ompDataClause oldFirstPrivateVars, ompDataClause oldLastPrivateVars, bool oldOrdered, bool oldNoWait); ompFor(bool oldOrdered, bool oldNoWait); bool getOrdered(); bool getNowait(); std::string str(); std::string str_nopragma(); }; /************************************ ***** #pragma omp parallel for ***** ************************************/ class ompParallelFor { bool ordered; public: ompReductionsSet reductions; ompDataClause privateVars; ompDataClause firstPrivateVars; ompDataClause lastPrivateVars; ompDataClause sharedVars; ompDataClause copyinVars; defaultVarSharing varSharingClass; ompParallelFor(std::string directive, bool verbose = false); ompParallelFor(ompReductionsSet oldReductions, ompDataClause oldPrivateVars, ompDataClause oldFirstPrivateVars, ompDataClause oldLastPrivateVars, ompDataClause oldSharedVars, ompDataClause oldCopyinVars, bool oldOrdered, defaultVarSharing oldVarSharingClass); bool getOrdered(); std::string str(); std::string str_nopragma(); friend class ompParallel; }; /****************************** ***** #pragma omp single ***** ******************************/ class ompSingle { bool nowait; public: ompDataClause privateVars; ompDataClause firstPrivateVars; ompDataClause copyPrivateVars; ompSingle(std::string directive, bool verbose = false); ompSingle(ompDataClause oldPrivateVars, ompDataClause oldFirstPrivateVars, ompDataClause oldCopyPrivateVars, bool oldNoWait); ompSingle(bool oldNoWait); bool getNowait(); std::string str(); std::string str_nopragma(); }; /************************************* ***** #pragma omp threadprivate ***** *************************************/ class ompThreadprivate { public: ompDataClause vars; ompThreadprivate(std::string directive, bool verbose=false); ompThreadprivate(ompThreadprivate& otp); ompThreadprivate(ompDataClause vars); std::string str(); std::string str_nopragma(); }; /************************************* ***** OpenMP-specific insertion ***** *************************************/ // returns the statement that follows the given pragma SgStatement* getPragmaBody(SgPragmaDeclaration* pragma); // calls annotateOmpFor() on all #pragma omp for SgForStatements in the given AST tree void annotateAllOmpFors(SgNode* root); // annotates the headers #pragma omp for loops to indicate that no insertion should take place // in the test and update sub-trees and that all insertions into the inialization sub-tree should // actually be placed right before the #pragma omp for void annotateOmpFor(SgPragmaDeclaration* pragma, SgForStatement* ompFor); class OMPcfgRWTransaction : public cfgRWTransaction { public: void insertBefore(DataflowNode cfgNode, SgExpression* newNode); void insertBefore(SgNode* n, SgExpression* newNode); void insertAfter(DataflowNode cfgNode, SgExpression* newNode); void insertAfter(SgNode* n, SgExpression* newNode); // insert an SgNode along the given CFGEdge void insertAlong(DataflowEdge e, SgExpression* newNode); }; // surrounds the body of the given pragma with a SgBasicBlock, // if oldBody!=NULL sets it to this pragma's original body // if newBody!=NULL setts it to this pragma's new body void wrapPragmaBody(SgPragmaDeclaration* pragma, SgStatement** oldBody=NULL, SgBasicBlock** newBody=NULL); // surrounds the given pragma and its body with a SgBasicBlock, // returns this new SgBasicBlock SgBasicBlock* wrapPragmaAndBody(SgPragmaDeclaration* pragma); // inserts the given statement to either the top or bottom of the given OpenMP directive // supports any directive that may have a basic block as its body void insertTopBottomOmpDirective(SgPragmaDeclaration* pragma, bool top, SgStatement* stmt); // inserts statements generated by stmtCreate at either the top or the bottom of the body of all OpenMP directives of the given // type in the given AST subtree // supports any directive that may have a basic block as its body void insertTopBottomOmpDirectives(SgNode* root, int ompType, bool top, SgStatement* (*stmtCreate)()); #endif
rng.c
#include <stdio.h> #include <omp.h> int main() { int chaos = 10000; int bound = 10; int i; // RNG will purposefully cause concurrency hazard // by incrementing this variable long hazard = 0; #pragma omp parallel for schedule(guided) for (i = 0; i < chaos; i++) hazard++; printf("%ld\n", hazard % bound); }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__ainv_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp64_uint64 // op(A') function: GB_tran__ainv_fp64_uint64 // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp64_uint64 ( double *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
decl.c
/* Process declarations and variables for -*- C++ -*- compiler. Copyright (C) 1988-2020 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C++ front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "target.h" #include "c-family/c-target.h" #include "cp-tree.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "flags.h" #include "tree-iterator.h" #include "decl.h" #include "intl.h" #include "toplev.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "debug.h" #include "plugin.h" #include "builtins.h" #include "gimplify.h" #include "asan.h" #include "gcc-rich-location.h" #include "langhooks.h" #include "omp-general.h" /* Possible cases of bad specifiers type used by bad_specifiers. */ enum bad_spec_place { BSP_VAR, /* variable */ BSP_PARM, /* parameter */ BSP_TYPE, /* type */ BSP_FIELD /* field */ }; static const char *redeclaration_error_message (tree, tree); static int decl_jump_unsafe (tree); static void require_complete_types_for_parms (tree); static tree grok_reference_init (tree, tree, tree, int); static tree grokvardecl (tree, tree, tree, const cp_decl_specifier_seq *, int, int, int, bool, int, tree, location_t); static void check_static_variable_definition (tree, tree); static void record_unknown_type (tree, const char *); static int member_function_or_else (tree, tree, enum overload_flags); static tree local_variable_p_walkfn (tree *, int *, void *); static const char *tag_name (enum tag_types); static tree lookup_and_check_tag (enum tag_types, tree, tag_scope, bool); static void maybe_deduce_size_from_array_init (tree, tree); static void layout_var_decl (tree); static tree check_initializer (tree, tree, int, vec<tree, va_gc> **); static void make_rtl_for_nonlocal_decl (tree, tree, const char *); static void copy_type_enum (tree , tree); static void check_function_type (tree, tree); static void finish_constructor_body (void); static void begin_destructor_body (void); static void finish_destructor_body (void); static void record_key_method_defined (tree); static tree create_array_type_for_decl (tree, tree, tree, location_t); static tree get_atexit_node (void); static tree get_dso_handle_node (void); static tree start_cleanup_fn (void); static void end_cleanup_fn (void); static tree cp_make_fname_decl (location_t, tree, int); static void initialize_predefined_identifiers (void); static tree check_special_function_return_type (special_function_kind, tree, tree, int, const location_t*); static tree push_cp_library_fn (enum tree_code, tree, int); static tree build_cp_library_fn (tree, enum tree_code, tree, int); static void store_parm_decls (tree); static void initialize_local_var (tree, tree); static void expand_static_init (tree, tree); static location_t smallest_type_location (const cp_decl_specifier_seq*); /* The following symbols are subsumed in the cp_global_trees array, and listed here individually for documentation purposes. C++ extensions tree wchar_decl_node; tree vtable_entry_type; tree delta_type_node; tree __t_desc_type_node; tree class_type_node; tree unknown_type_node; Array type `vtable_entry_type[]' tree vtbl_type_node; tree vtbl_ptr_type_node; Namespaces, tree std_node; tree abi_node; A FUNCTION_DECL which can call `abort'. Not necessarily the one that the user will declare, but sufficient to be called by routines that want to abort the program. tree abort_fndecl; Used by RTTI tree type_info_type_node, tinfo_decl_id, tinfo_decl_type; tree tinfo_var_id; */ tree cp_global_trees[CPTI_MAX]; /* A list of objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ tree static_aggregates; /* Like static_aggregates, but for thread_local variables. */ tree tls_aggregates; /* -- end of C++ */ /* A node for the integer constant 2. */ tree integer_two_node; /* vector of static decls. */ vec<tree, va_gc> *static_decls; /* vector of keyed classes. */ vec<tree, va_gc> *keyed_classes; /* Used only for jumps to as-yet undefined labels, since jumps to defined labels can have their validity checked immediately. */ struct GTY((chain_next ("%h.next"))) named_label_use_entry { struct named_label_use_entry *next; /* The binding level to which this entry is *currently* attached. This is initially the binding level in which the goto appeared, but is modified as scopes are closed. */ cp_binding_level *binding_level; /* The head of the names list that was current when the goto appeared, or the inner scope popped. These are the decls that will *not* be skipped when jumping to the label. */ tree names_in_scope; /* The location of the goto, for error reporting. */ location_t o_goto_locus; /* True if an OpenMP structured block scope has been closed since the goto appeared. This means that the branch from the label will illegally exit an OpenMP scope. */ bool in_omp_scope; }; /* A list of all LABEL_DECLs in the function that have names. Here so we can clear out their names' definitions at the end of the function, and so we can check the validity of jumps to these labels. */ struct GTY((for_user)) named_label_entry { tree name; /* Name of decl. */ tree label_decl; /* LABEL_DECL, unless deleted local label. */ named_label_entry *outer; /* Outer shadowed chain. */ /* The binding level to which the label is *currently* attached. This is initially set to the binding level in which the label is defined, but is modified as scopes are closed. */ cp_binding_level *binding_level; /* The head of the names list that was current when the label was defined, or the inner scope popped. These are the decls that will be skipped when jumping to the label. */ tree names_in_scope; /* A vector of all decls from all binding levels that would be crossed by a backward branch to the label. */ vec<tree, va_gc> *bad_decls; /* A list of uses of the label, before the label is defined. */ named_label_use_entry *uses; /* The following bits are set after the label is defined, and are updated as scopes are popped. They indicate that a jump to the label will illegally enter a scope of the given flavor. */ bool in_try_scope; bool in_catch_scope; bool in_omp_scope; bool in_transaction_scope; bool in_constexpr_if; }; #define named_labels cp_function_chain->x_named_labels /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ int function_depth; /* Whether the exception-specifier is part of a function type (i.e. C++17). */ bool flag_noexcept_type; /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* A list of VAR_DECLs whose type was incomplete at the time the variable was declared. */ struct GTY(()) incomplete_var { tree decl; tree incomplete_type; }; static GTY(()) vec<incomplete_var, va_gc> *incomplete_vars; /* Returns the kind of template specialization we are currently processing, given that it's declaration contained N_CLASS_SCOPES explicit scope qualifications. */ tmpl_spec_kind current_tmpl_spec_kind (int n_class_scopes) { int n_template_parm_scopes = 0; int seen_specialization_p = 0; int innermost_specialization_p = 0; cp_binding_level *b; /* Scan through the template parameter scopes. */ for (b = current_binding_level; b->kind == sk_template_parms; b = b->level_chain) { /* If we see a specialization scope inside a parameter scope, then something is wrong. That corresponds to a declaration like: template <class T> template <> ... which is always invalid since [temp.expl.spec] forbids the specialization of a class member template if the enclosing class templates are not explicitly specialized as well. */ if (b->explicit_spec_p) { if (n_template_parm_scopes == 0) innermost_specialization_p = 1; else seen_specialization_p = 1; } else if (seen_specialization_p == 1) return tsk_invalid_member_spec; ++n_template_parm_scopes; } /* Handle explicit instantiations. */ if (processing_explicit_instantiation) { if (n_template_parm_scopes != 0) /* We've seen a template parameter list during an explicit instantiation. For example: template <class T> template void f(int); This is erroneous. */ return tsk_invalid_expl_inst; else return tsk_expl_inst; } if (n_template_parm_scopes < n_class_scopes) /* We've not seen enough template headers to match all the specialized classes present. For example: template <class T> void R<T>::S<T>::f(int); This is invalid; there needs to be one set of template parameters for each class. */ return tsk_insufficient_parms; else if (n_template_parm_scopes == n_class_scopes) /* We're processing a non-template declaration (even though it may be a member of a template class.) For example: template <class T> void S<T>::f(int); The `class T' matches the `S<T>', leaving no template headers corresponding to the `f'. */ return tsk_none; else if (n_template_parm_scopes > n_class_scopes + 1) /* We've got too many template headers. For example: template <> template <class T> void f (T); There need to be more enclosing classes. */ return tsk_excessive_parms; else /* This must be a template. It's of the form: template <class T> template <class U> void S<T>::f(U); This is a specialization if the innermost level was a specialization; otherwise it's just a definition of the template. */ return innermost_specialization_p ? tsk_expl_spec : tsk_template; } /* Exit the current scope. */ void finish_scope (void) { poplevel (0, 0, 0); } /* When a label goes out of scope, check to see if that label was used in a valid manner, and issue any appropriate warnings or errors. */ static void check_label_used (tree label) { if (!processing_template_decl) { if (DECL_INITIAL (label) == NULL_TREE) { location_t location; error ("label %q+D used but not defined", label); location = input_location; /* FIXME want (LOCATION_FILE (input_location), (line)0) */ /* Avoid crashing later. */ define_label (location, DECL_NAME (label)); } else warn_for_unused_label (label); } } /* Helper function to sort named label entries in a vector by DECL_UID. */ static int sort_labels (const void *a, const void *b) { tree label1 = *(tree const *) a; tree label2 = *(tree const *) b; /* DECL_UIDs can never be equal. */ return DECL_UID (label1) > DECL_UID (label2) ? -1 : +1; } /* At the end of a function, all labels declared within the function go out of scope. BLOCK is the top-level block for the function. */ static void pop_labels (tree block) { if (!named_labels) return; /* We need to add the labels to the block chain, so debug information is emitted. But, we want the order to be stable so need to sort them first. Otherwise the debug output could be randomly ordered. I guess it's mostly stable, unless the hash table implementation changes. */ auto_vec<tree, 32> labels (named_labels->elements ()); hash_table<named_label_hash>::iterator end (named_labels->end ()); for (hash_table<named_label_hash>::iterator iter (named_labels->begin ()); iter != end; ++iter) { named_label_entry *ent = *iter; gcc_checking_assert (!ent->outer); if (ent->label_decl) labels.quick_push (ent->label_decl); ggc_free (ent); } named_labels = NULL; labels.qsort (sort_labels); while (labels.length ()) { tree label = labels.pop (); DECL_CHAIN (label) = BLOCK_VARS (block); BLOCK_VARS (block) = label; check_label_used (label); } } /* At the end of a block with local labels, restore the outer definition. */ static void pop_local_label (tree id, tree label) { check_label_used (label); named_label_entry **slot = named_labels->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id), NO_INSERT); named_label_entry *ent = *slot; if (ent->outer) ent = ent->outer; else { ent = ggc_cleared_alloc<named_label_entry> (); ent->name = id; } *slot = ent; } /* The following two routines are used to interface to Objective-C++. The binding level is purposely treated as an opaque type. */ void * objc_get_current_scope (void) { return current_binding_level; } /* The following routine is used by the NeXT-style SJLJ exceptions; variables get marked 'volatile' so as to not be clobbered by _setjmp()/_longjmp() calls. All variables in the current scope, as well as parent scopes up to (but not including) ENCLOSING_BLK shall be thusly marked. */ void objc_mark_locals_volatile (void *enclosing_blk) { cp_binding_level *scope; for (scope = current_binding_level; scope && scope != enclosing_blk; scope = scope->level_chain) { tree decl; for (decl = scope->names; decl; decl = TREE_CHAIN (decl)) objc_volatilize_decl (decl); /* Do not climb up past the current function. */ if (scope->kind == sk_function_parms) break; } } /* True if B is the level for the condition of a constexpr if. */ static bool level_for_constexpr_if (cp_binding_level *b) { return (b->kind == sk_cond && b->this_entity && TREE_CODE (b->this_entity) == IF_STMT && IF_STMT_CONSTEXPR_P (b->this_entity)); } /* Update data for defined and undefined labels when leaving a scope. */ int poplevel_named_label_1 (named_label_entry **slot, cp_binding_level *bl) { named_label_entry *ent = *slot; cp_binding_level *obl = bl->level_chain; if (ent->binding_level == bl) { tree decl; /* ENT->NAMES_IN_SCOPE may contain a mixture of DECLs and TREE_LISTs representing OVERLOADs, so be careful. */ for (decl = ent->names_in_scope; decl; decl = (DECL_P (decl) ? DECL_CHAIN (decl) : TREE_CHAIN (decl))) if (decl_jump_unsafe (decl)) vec_safe_push (ent->bad_decls, decl); ent->binding_level = obl; ent->names_in_scope = obl->names; switch (bl->kind) { case sk_try: ent->in_try_scope = true; break; case sk_catch: ent->in_catch_scope = true; break; case sk_omp: ent->in_omp_scope = true; break; case sk_transaction: ent->in_transaction_scope = true; break; case sk_block: if (level_for_constexpr_if (bl->level_chain)) ent->in_constexpr_if = true; break; default: break; } } else if (ent->uses) { struct named_label_use_entry *use; for (use = ent->uses; use ; use = use->next) if (use->binding_level == bl) { use->binding_level = obl; use->names_in_scope = obl->names; if (bl->kind == sk_omp) use->in_omp_scope = true; } } return 1; } /* Saved errorcount to avoid -Wunused-but-set-{parameter,variable} warnings when errors were reported, except for -Werror-unused-but-set-*. */ static int unused_but_set_errorcount; /* Exit a binding level. Pop the level off, and restore the state of the identifier-decl mappings that were in effect when this level was entered. If KEEP == 1, this level had explicit declarations, so and create a "block" (a BLOCK node) for the level to record its declarations and subblocks for symbol table output. If FUNCTIONBODY is nonzero, this level is the body of a function, so create a block as if KEEP were set and also clear out all label names. If REVERSE is nonzero, reverse the order of decls before putting them into the BLOCK. */ tree poplevel (int keep, int reverse, int functionbody) { tree link; /* The chain of decls was accumulated in reverse order. Put it into forward order, just for cleanliness. */ tree decls; tree subblocks; tree block; tree decl; scope_kind kind; bool subtime = timevar_cond_start (TV_NAME_LOOKUP); restart: block = NULL_TREE; gcc_assert (current_binding_level->kind != sk_class && current_binding_level->kind != sk_namespace); if (current_binding_level->kind == sk_cleanup) functionbody = 0; subblocks = functionbody >= 0 ? current_binding_level->blocks : 0; gcc_assert (!vec_safe_length (current_binding_level->class_shadowed)); /* We used to use KEEP == 2 to indicate that the new block should go at the beginning of the list of blocks at this binding level, rather than the end. This hack is no longer used. */ gcc_assert (keep == 0 || keep == 1); if (current_binding_level->keep) keep = 1; /* Any uses of undefined labels, and any defined labels, now operate under constraints of next binding contour. */ if (cfun && !functionbody && named_labels) named_labels->traverse<cp_binding_level *, poplevel_named_label_1> (current_binding_level); /* Get the decls in the order they were written. Usually current_binding_level->names is in reverse order. But parameter decls were previously put in forward order. */ decls = current_binding_level->names; if (reverse) { decls = nreverse (decls); current_binding_level->names = decls; } /* If there were any declarations or structure tags in that level, or if this level is a function body, create a BLOCK to record them for the life of this function. */ block = NULL_TREE; /* Avoid function body block if possible. */ if (functionbody && subblocks && BLOCK_CHAIN (subblocks) == NULL_TREE) keep = 0; else if (keep == 1 || functionbody) block = make_node (BLOCK); if (block != NULL_TREE) { BLOCK_VARS (block) = decls; BLOCK_SUBBLOCKS (block) = subblocks; } /* In each subblock, record that this is its superior. */ if (keep >= 0) for (link = subblocks; link; link = BLOCK_CHAIN (link)) BLOCK_SUPERCONTEXT (link) = block; /* Before we remove the declarations first check for unused variables. */ if ((warn_unused_variable || warn_unused_but_set_variable) && current_binding_level->kind != sk_template_parms && !processing_template_decl) for (tree d = get_local_decls (); d; d = TREE_CHAIN (d)) { /* There are cases where D itself is a TREE_LIST. See in push_local_binding where the list of decls returned by getdecls is built. */ decl = TREE_CODE (d) == TREE_LIST ? TREE_VALUE (d) : d; tree type = TREE_TYPE (decl); if (VAR_P (decl) && (! TREE_USED (decl) || !DECL_READ_P (decl)) && ! DECL_IN_SYSTEM_HEADER (decl) /* For structured bindings, consider only real variables, not subobjects. */ && (DECL_DECOMPOSITION_P (decl) ? !DECL_DECOMP_BASE (decl) : (DECL_NAME (decl) && !DECL_ARTIFICIAL (decl))) && type != error_mark_node && (!CLASS_TYPE_P (type) || !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) || lookup_attribute ("warn_unused", TYPE_ATTRIBUTES (TREE_TYPE (decl))))) { if (! TREE_USED (decl)) { if (!DECL_NAME (decl) && DECL_DECOMPOSITION_P (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_variable, "unused structured binding declaration"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_variable, "unused variable %qD", decl); } else if (DECL_CONTEXT (decl) == current_function_decl // For -Wunused-but-set-variable leave references alone. && !TYPE_REF_P (TREE_TYPE (decl)) && errorcount == unused_but_set_errorcount) { if (!DECL_NAME (decl) && DECL_DECOMPOSITION_P (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_variable, "structured " "binding declaration set but not used"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_variable, "variable %qD set but not used", decl); unused_but_set_errorcount = errorcount; } } } /* Remove declarations for all the DECLs in this level. */ for (link = decls; link; link = TREE_CHAIN (link)) { decl = TREE_CODE (link) == TREE_LIST ? TREE_VALUE (link) : link; tree name = OVL_NAME (decl); /* Remove the binding. */ if (TREE_CODE (decl) == LABEL_DECL) pop_local_label (name, decl); else pop_local_binding (name, decl); } /* Restore the IDENTIFIER_TYPE_VALUEs. */ for (link = current_binding_level->type_shadowed; link; link = TREE_CHAIN (link)) SET_IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (link), TREE_VALUE (link)); /* There may be OVERLOADs (wrapped in TREE_LISTs) on the BLOCK_VARs list if a `using' declaration put them there. The debugging back ends won't understand OVERLOAD, so we remove them here. Because the BLOCK_VARS are (temporarily) shared with CURRENT_BINDING_LEVEL->NAMES we must do this fixup after we have popped all the bindings. Also remove undeduced 'auto' decls, which LTO doesn't understand, and can't have been used by anything. */ if (block) { tree* d; for (d = &BLOCK_VARS (block); *d; ) { if (TREE_CODE (*d) == TREE_LIST || (!processing_template_decl && undeduced_auto_decl (*d))) *d = TREE_CHAIN (*d); else d = &DECL_CHAIN (*d); } } /* If the level being exited is the top level of a function, check over all the labels. */ if (functionbody) { if (block) { /* Since this is the top level block of a function, the vars are the function's parameters. Don't leave them in the BLOCK because they are found in the FUNCTION_DECL instead. */ BLOCK_VARS (block) = 0; pop_labels (block); } else pop_labels (subblocks); } kind = current_binding_level->kind; if (kind == sk_cleanup) { tree stmt; /* If this is a temporary binding created for a cleanup, then we'll have pushed a statement list level. Pop that, create a new BIND_EXPR for the block, and insert it into the stream. */ stmt = pop_stmt_list (current_binding_level->statement_list); stmt = c_build_bind_expr (input_location, block, stmt); add_stmt (stmt); } leave_scope (); if (functionbody) { /* The current function is being defined, so its DECL_INITIAL should be error_mark_node. */ gcc_assert (DECL_INITIAL (current_function_decl) == error_mark_node); DECL_INITIAL (current_function_decl) = block ? block : subblocks; if (subblocks) { if (FUNCTION_NEEDS_BODY_BLOCK (current_function_decl)) { if (BLOCK_SUBBLOCKS (subblocks)) BLOCK_OUTER_CURLY_BRACE_P (BLOCK_SUBBLOCKS (subblocks)) = 1; } else BLOCK_OUTER_CURLY_BRACE_P (subblocks) = 1; } } else if (block) current_binding_level->blocks = block_chainon (current_binding_level->blocks, block); /* If we did not make a block for the level just exited, any blocks made for inner levels (since they cannot be recorded as subblocks in that level) must be carried forward so they will later become subblocks of something else. */ else if (subblocks) current_binding_level->blocks = block_chainon (current_binding_level->blocks, subblocks); /* Each and every BLOCK node created here in `poplevel' is important (e.g. for proper debugging information) so if we created one earlier, mark it as "used". */ if (block) TREE_USED (block) = 1; /* All temporary bindings created for cleanups are popped silently. */ if (kind == sk_cleanup) goto restart; timevar_cond_stop (TV_NAME_LOOKUP, subtime); return block; } /* Call wrapup_globals_declarations for the globals in NAMESPACE. */ /* Diagnose odr-used extern inline variables without definitions in the current TU. */ int wrapup_namespace_globals () { if (vec<tree, va_gc> *statics = static_decls) { tree decl; unsigned int i; FOR_EACH_VEC_ELT (*statics, i, decl) { if (warn_unused_function && TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl) && !DECL_ARTIFICIAL (decl) && !DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_function, "%qF declared %<static%> but never defined", decl); if (VAR_P (decl) && DECL_EXTERNAL (decl) && DECL_INLINE_VAR_P (decl) && DECL_ODR_USED (decl)) error_at (DECL_SOURCE_LOCATION (decl), "odr-used inline variable %qD is not defined", decl); } /* Clear out the list, so we don't rescan next time. */ static_decls = NULL; /* Write out any globals that need to be output. */ return wrapup_global_declarations (statics->address (), statics->length ()); } return 0; } /* In C++, you don't have to write `struct S' to refer to `S'; you can just use `S'. We accomplish this by creating a TYPE_DECL as if the user had written `typedef struct S S'. Create and return the TYPE_DECL for TYPE. */ tree create_implicit_typedef (tree name, tree type) { tree decl; decl = build_decl (input_location, TYPE_DECL, name, type); DECL_ARTIFICIAL (decl) = 1; /* There are other implicit type declarations, like the one *within* a class that allows you to write `S::S'. We must distinguish amongst these. */ SET_DECL_IMPLICIT_TYPEDEF_P (decl); TYPE_NAME (type) = decl; TYPE_STUB_DECL (type) = decl; return decl; } /* Function-scope local entities that need discriminators. Each entry is a {decl,name} pair. VAR_DECLs for anon unions get their name smashed, so we cannot rely on DECL_NAME. */ static GTY((deletable)) vec<tree, va_gc> *local_entities; /* Determine the mangling discriminator of local DECL. There are generally very few of these in any particular function. */ void determine_local_discriminator (tree decl) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); retrofit_lang_decl (decl); tree ctx = DECL_CONTEXT (decl); tree name = (TREE_CODE (decl) == TYPE_DECL && TYPE_UNNAMED_P (TREE_TYPE (decl)) ? NULL_TREE : DECL_NAME (decl)); size_t nelts = vec_safe_length (local_entities); for (size_t i = 0; i < nelts; i += 2) { tree *pair = &(*local_entities)[i]; tree d = pair[0]; tree n = pair[1]; gcc_checking_assert (d != decl); if (name == n && TREE_CODE (decl) == TREE_CODE (d) && ctx == DECL_CONTEXT (d)) { tree disc = integer_one_node; if (DECL_DISCRIMINATOR (d)) disc = build_int_cst (TREE_TYPE (disc), TREE_INT_CST_LOW (DECL_DISCRIMINATOR (d)) + 1); DECL_DISCRIMINATOR (decl) = disc; /* Replace the saved decl. */ pair[0] = decl; decl = NULL_TREE; break; } } if (decl) { vec_safe_reserve (local_entities, 2); local_entities->quick_push (decl); local_entities->quick_push (name); } timevar_cond_stop (TV_NAME_LOOKUP, subtime); } /* Returns true if functions FN1 and FN2 have equivalent trailing requires clauses. */ static bool function_requirements_equivalent_p (tree newfn, tree oldfn) { /* In the concepts TS, the combined constraints are compared. */ if (cxx_dialect < cxx2a) { tree ci1 = get_constraints (oldfn); tree ci2 = get_constraints (newfn); tree req1 = ci1 ? CI_ASSOCIATED_CONSTRAINTS (ci1) : NULL_TREE; tree req2 = ci2 ? CI_ASSOCIATED_CONSTRAINTS (ci2) : NULL_TREE; return cp_tree_equal (req1, req2); } /* Compare only trailing requirements. */ tree reqs1 = get_trailing_function_requirements (newfn); tree reqs2 = get_trailing_function_requirements (oldfn); if ((reqs1 != NULL_TREE) != (reqs2 != NULL_TREE)) return false; reqs1 = maybe_substitute_reqs_for (reqs1, newfn); reqs2 = maybe_substitute_reqs_for (reqs2, oldfn); return cp_tree_equal (reqs1, reqs2); } /* Subroutine of duplicate_decls: return truthvalue of whether or not types of these decls match. For C++, we must compare the parameter list so that `int' can match `int&' in a parameter position, but `int&' is not confused with `const int&'. */ int decls_match (tree newdecl, tree olddecl, bool record_versions /* = true */) { int types_match; if (newdecl == olddecl) return 1; if (TREE_CODE (newdecl) != TREE_CODE (olddecl)) /* If the two DECLs are not even the same kind of thing, we're not interested in their types. */ return 0; gcc_assert (DECL_P (newdecl)); if (TREE_CODE (newdecl) == FUNCTION_DECL) { tree f1 = TREE_TYPE (newdecl); tree f2 = TREE_TYPE (olddecl); tree p1 = TYPE_ARG_TYPES (f1); tree p2 = TYPE_ARG_TYPES (f2); tree r2; /* Specializations of different templates are different functions even if they have the same type. */ tree t1 = (DECL_USE_TEMPLATE (newdecl) ? DECL_TI_TEMPLATE (newdecl) : NULL_TREE); tree t2 = (DECL_USE_TEMPLATE (olddecl) ? DECL_TI_TEMPLATE (olddecl) : NULL_TREE); if (t1 != t2) return 0; if (CP_DECL_CONTEXT (newdecl) != CP_DECL_CONTEXT (olddecl) && ! (DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl))) return 0; /* A new declaration doesn't match a built-in one unless it is also extern "C". */ if (DECL_IS_BUILTIN (olddecl) && DECL_EXTERN_C_P (olddecl) && !DECL_EXTERN_C_P (newdecl)) return 0; if (TREE_CODE (f1) != TREE_CODE (f2)) return 0; /* A declaration with deduced return type should use its pre-deduction type for declaration matching. */ r2 = fndecl_declared_return_type (olddecl); if (same_type_p (TREE_TYPE (f1), r2)) { if (!prototype_p (f2) && DECL_EXTERN_C_P (olddecl) && fndecl_built_in_p (olddecl)) { types_match = self_promoting_args_p (p1); if (p1 == void_list_node) TREE_TYPE (newdecl) = TREE_TYPE (olddecl); } else types_match = compparms (p1, p2) && type_memfn_rqual (f1) == type_memfn_rqual (f2) && (TYPE_ATTRIBUTES (TREE_TYPE (newdecl)) == NULL_TREE || comp_type_attributes (TREE_TYPE (newdecl), TREE_TYPE (olddecl)) != 0); } else types_match = 0; /* Two function declarations match if either has a requires-clause then both have a requires-clause and their constraints-expressions are equivalent. */ if (types_match && flag_concepts) types_match = function_requirements_equivalent_p (newdecl, olddecl); /* The decls dont match if they correspond to two different versions of the same function. Disallow extern "C" functions to be versions for now. */ if (types_match && !DECL_EXTERN_C_P (newdecl) && !DECL_EXTERN_C_P (olddecl) && record_versions && maybe_version_functions (newdecl, olddecl, (!DECL_FUNCTION_VERSIONED (newdecl) || !DECL_FUNCTION_VERSIONED (olddecl)))) return 0; } else if (TREE_CODE (newdecl) == TEMPLATE_DECL) { if (!template_heads_equivalent_p (newdecl, olddecl)) return 0; tree oldres = DECL_TEMPLATE_RESULT (olddecl); tree newres = DECL_TEMPLATE_RESULT (newdecl); if (TREE_CODE (newres) != TREE_CODE (oldres)) return 0; /* Two template types match if they are the same. Otherwise, compare the underlying declarations. */ if (TREE_CODE (newres) == TYPE_DECL) types_match = same_type_p (TREE_TYPE (newres), TREE_TYPE (oldres)); else types_match = decls_match (newres, oldres); } else { /* Need to check scope for variable declaration (VAR_DECL). For typedef (TYPE_DECL), scope is ignored. */ if (VAR_P (newdecl) && CP_DECL_CONTEXT (newdecl) != CP_DECL_CONTEXT (olddecl) /* [dcl.link] Two declarations for an object with C language linkage with the same name (ignoring the namespace that qualify it) that appear in different namespace scopes refer to the same object. */ && !(DECL_EXTERN_C_P (olddecl) && DECL_EXTERN_C_P (newdecl))) return 0; if (TREE_TYPE (newdecl) == error_mark_node) types_match = TREE_TYPE (olddecl) == error_mark_node; else if (TREE_TYPE (olddecl) == NULL_TREE) types_match = TREE_TYPE (newdecl) == NULL_TREE; else if (TREE_TYPE (newdecl) == NULL_TREE) types_match = 0; else types_match = comptypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl), COMPARE_REDECLARATION); } return types_match; } /* NEWDECL and OLDDECL have identical signatures. If they are different versions adjust them and return true. If RECORD is set to true, record function versions. */ bool maybe_version_functions (tree newdecl, tree olddecl, bool record) { if (!targetm.target_option.function_versions (newdecl, olddecl)) return false; if (!DECL_FUNCTION_VERSIONED (olddecl)) { DECL_FUNCTION_VERSIONED (olddecl) = 1; if (DECL_ASSEMBLER_NAME_SET_P (olddecl)) mangle_decl (olddecl); } if (!DECL_FUNCTION_VERSIONED (newdecl)) { DECL_FUNCTION_VERSIONED (newdecl) = 1; if (DECL_ASSEMBLER_NAME_SET_P (newdecl)) mangle_decl (newdecl); } if (record) cgraph_node::record_function_versions (olddecl, newdecl); return true; } /* If NEWDECL is `static' and an `extern' was seen previously, warn about it. OLDDECL is the previous declaration. Note that this does not apply to the C++ case of declaring a variable `extern const' and then later `const'. Don't complain about built-in functions, since they are beyond the user's control. */ void warn_extern_redeclared_static (tree newdecl, tree olddecl) { if (TREE_CODE (newdecl) == TYPE_DECL || TREE_CODE (newdecl) == TEMPLATE_DECL || TREE_CODE (newdecl) == CONST_DECL || TREE_CODE (newdecl) == NAMESPACE_DECL) return; /* Don't get confused by static member functions; that's a different use of `static'. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_STATIC_FUNCTION_P (newdecl)) return; /* If the old declaration was `static', or the new one isn't, then everything is OK. */ if (DECL_THIS_STATIC (olddecl) || !DECL_THIS_STATIC (newdecl)) return; /* It's OK to declare a builtin function as `static'. */ if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_ARTIFICIAL (olddecl)) return; auto_diagnostic_group d; if (permerror (DECL_SOURCE_LOCATION (newdecl), "%qD was declared %<extern%> and later %<static%>", newdecl)) inform (DECL_SOURCE_LOCATION (olddecl), "previous declaration of %qD", olddecl); } /* NEW_DECL is a redeclaration of OLD_DECL; both are functions or function templates. If their exception specifications do not match, issue a diagnostic. */ static void check_redeclaration_exception_specification (tree new_decl, tree old_decl) { tree new_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (new_decl)); tree old_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (old_decl)); /* Two default specs are equivalent, don't force evaluation. */ if (UNEVALUATED_NOEXCEPT_SPEC_P (new_exceptions) && UNEVALUATED_NOEXCEPT_SPEC_P (old_exceptions)) return; if (!type_dependent_expression_p (old_decl)) { maybe_instantiate_noexcept (new_decl); maybe_instantiate_noexcept (old_decl); } new_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (new_decl)); old_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (old_decl)); /* [except.spec] If any declaration of a function has an exception-specification, all declarations, including the definition and an explicit specialization, of that function shall have an exception-specification with the same set of type-ids. */ if (! DECL_IS_BUILTIN (old_decl) && !comp_except_specs (new_exceptions, old_exceptions, ce_normal)) { const char *const msg = G_("declaration of %qF has a different exception specifier"); bool complained = true; location_t new_loc = DECL_SOURCE_LOCATION (new_decl); auto_diagnostic_group d; if (DECL_IN_SYSTEM_HEADER (old_decl)) complained = pedwarn (new_loc, OPT_Wsystem_headers, msg, new_decl); else if (!flag_exceptions) /* We used to silently permit mismatched eh specs with -fno-exceptions, so make them a pedwarn now. */ complained = pedwarn (new_loc, OPT_Wpedantic, msg, new_decl); else error_at (new_loc, msg, new_decl); if (complained) inform (DECL_SOURCE_LOCATION (old_decl), "from previous declaration %qF", old_decl); } } /* Return true if OLD_DECL and NEW_DECL agree on constexprness. Otherwise issue diagnostics. */ static bool validate_constexpr_redeclaration (tree old_decl, tree new_decl) { old_decl = STRIP_TEMPLATE (old_decl); new_decl = STRIP_TEMPLATE (new_decl); if (!VAR_OR_FUNCTION_DECL_P (old_decl) || !VAR_OR_FUNCTION_DECL_P (new_decl)) return true; if (DECL_DECLARED_CONSTEXPR_P (old_decl) == DECL_DECLARED_CONSTEXPR_P (new_decl)) { if (TREE_CODE (old_decl) != FUNCTION_DECL) return true; if (DECL_IMMEDIATE_FUNCTION_P (old_decl) == DECL_IMMEDIATE_FUNCTION_P (new_decl)) return true; } if (TREE_CODE (old_decl) == FUNCTION_DECL) { if (fndecl_built_in_p (old_decl)) { /* Hide a built-in declaration. */ DECL_DECLARED_CONSTEXPR_P (old_decl) = DECL_DECLARED_CONSTEXPR_P (new_decl); if (DECL_IMMEDIATE_FUNCTION_P (new_decl)) SET_DECL_IMMEDIATE_FUNCTION_P (old_decl); return true; } /* 7.1.5 [dcl.constexpr] Note: An explicit specialization can differ from the template declaration with respect to the constexpr specifier. */ if (! DECL_TEMPLATE_SPECIALIZATION (old_decl) && DECL_TEMPLATE_SPECIALIZATION (new_decl)) return true; const char *kind = "constexpr"; if (DECL_IMMEDIATE_FUNCTION_P (old_decl) || DECL_IMMEDIATE_FUNCTION_P (new_decl)) kind = "consteval"; error_at (DECL_SOURCE_LOCATION (new_decl), "redeclaration %qD differs in %qs " "from previous declaration", new_decl, kind); inform (DECL_SOURCE_LOCATION (old_decl), "previous declaration %qD", old_decl); return false; } return true; } // If OLDDECL and NEWDECL are concept declarations with the same type // (i.e., and template parameters), but different requirements, // emit diagnostics and return true. Otherwise, return false. static inline bool check_concept_refinement (tree olddecl, tree newdecl) { if (!DECL_DECLARED_CONCEPT_P (olddecl) || !DECL_DECLARED_CONCEPT_P (newdecl)) return false; tree d1 = DECL_TEMPLATE_RESULT (olddecl); tree d2 = DECL_TEMPLATE_RESULT (newdecl); if (TREE_CODE (d1) != TREE_CODE (d2)) return false; tree t1 = TREE_TYPE (d1); tree t2 = TREE_TYPE (d2); if (TREE_CODE (d1) == FUNCTION_DECL) { if (compparms (TYPE_ARG_TYPES (t1), TYPE_ARG_TYPES (t2)) && comp_template_parms (DECL_TEMPLATE_PARMS (olddecl), DECL_TEMPLATE_PARMS (newdecl)) && !equivalently_constrained (olddecl, newdecl)) { error ("cannot specialize concept %q#D", olddecl); return true; } } return false; } /* DECL is a redeclaration of a function or function template. If it does have default arguments issue a diagnostic. Note: this function is used to enforce the requirements in C++11 8.3.6 about no default arguments in redeclarations. */ static void check_redeclaration_no_default_args (tree decl) { gcc_assert (DECL_DECLARES_FUNCTION_P (decl)); for (tree t = FUNCTION_FIRST_USER_PARMTYPE (decl); t && t != void_list_node; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (DECL_SOURCE_LOCATION (decl), "redeclaration of %q#D may not have default " "arguments", decl); return; } } /* NEWDECL is a redeclaration of a function or function template OLDDECL, in any case represented as FUNCTION_DECLs (the DECL_TEMPLATE_RESULTs of the TEMPLATE_DECLs in case of function templates). This function is used to enforce the final part of C++17 11.3.6/4, about a single declaration: "If a friend declaration specifies a default argument expression, that declaration shall be a definition and shall be the only declaration of the function or function template in the translation unit." */ static void check_no_redeclaration_friend_default_args (tree olddecl, tree newdecl, bool olddecl_hidden_friend_p) { if (!olddecl_hidden_friend_p && !DECL_FRIEND_P (newdecl)) return; tree t1 = FUNCTION_FIRST_USER_PARMTYPE (olddecl); tree t2 = FUNCTION_FIRST_USER_PARMTYPE (newdecl); for (; t1 && t1 != void_list_node; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)) if ((olddecl_hidden_friend_p && TREE_PURPOSE (t1)) || (DECL_FRIEND_P (newdecl) && TREE_PURPOSE (t2))) { auto_diagnostic_group d; if (permerror (DECL_SOURCE_LOCATION (newdecl), "friend declaration of %q#D specifies default " "arguments and isn%'t the only declaration", newdecl)) inform (DECL_SOURCE_LOCATION (olddecl), "previous declaration of %q#D", olddecl); return; } } /* Merge tree bits that correspond to attributes noreturn, nothrow, const, malloc, and pure from NEWDECL with those of OLDDECL. */ static void merge_attribute_bits (tree newdecl, tree olddecl) { TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); TREE_THIS_VOLATILE (olddecl) |= TREE_THIS_VOLATILE (newdecl); TREE_NOTHROW (newdecl) |= TREE_NOTHROW (olddecl); TREE_NOTHROW (olddecl) |= TREE_NOTHROW (newdecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); TREE_READONLY (olddecl) |= TREE_READONLY (newdecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_MALLOC (olddecl) |= DECL_IS_MALLOC (newdecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_PURE_P (olddecl) |= DECL_PURE_P (newdecl); DECL_UNINLINABLE (newdecl) |= DECL_UNINLINABLE (olddecl); DECL_UNINLINABLE (olddecl) |= DECL_UNINLINABLE (newdecl); } #define GNU_INLINE_P(fn) (DECL_DECLARED_INLINE_P (fn) \ && lookup_attribute ("gnu_inline", \ DECL_ATTRIBUTES (fn))) /* A subroutine of duplicate_decls. Emits a diagnostic when newdecl ambiguates olddecl. Returns true if an error occurs. */ static bool duplicate_function_template_decls (tree newdecl, tree olddecl) { tree newres = DECL_TEMPLATE_RESULT (newdecl); tree oldres = DECL_TEMPLATE_RESULT (olddecl); /* Function template declarations can be differentiated by parameter and return type. */ if (compparms (TYPE_ARG_TYPES (TREE_TYPE (oldres)), TYPE_ARG_TYPES (TREE_TYPE (newres))) && same_type_p (TREE_TYPE (TREE_TYPE (newdecl)), TREE_TYPE (TREE_TYPE (olddecl)))) { /* ... and also by their template-heads and requires-clauses. */ if (template_heads_equivalent_p (newdecl, olddecl) && function_requirements_equivalent_p (newres, oldres)) { error ("ambiguating new declaration %q+#D", newdecl); inform (DECL_SOURCE_LOCATION (olddecl), "old declaration %q#D", olddecl); return true; } /* FIXME: The types are the same but the are differences in either the template heads or function requirements. We should be able to diagnose a set of common errors stemming from these declarations. For example: template<typename T> requires C void f(...); template<typename T> void f(...) requires C; These are functionally equivalent but not equivalent. */ } return false; } /* If NEWDECL is a redeclaration of OLDDECL, merge the declarations. If the redeclaration is invalid, a diagnostic is issued, and the error_mark_node is returned. Otherwise, OLDDECL is returned. If NEWDECL is not a redeclaration of OLDDECL, NULL_TREE is returned. NEWDECL_IS_FRIEND is true if NEWDECL was declared as a friend. */ tree duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend) { unsigned olddecl_uid = DECL_UID (olddecl); int olddecl_friend = 0, types_match = 0, hidden_friend = 0; int olddecl_hidden_friend = 0; int new_defines_function = 0; tree new_template_info; location_t olddecl_loc = DECL_SOURCE_LOCATION (olddecl); location_t newdecl_loc = DECL_SOURCE_LOCATION (newdecl); if (newdecl == olddecl) return olddecl; types_match = decls_match (newdecl, olddecl); /* If either the type of the new decl or the type of the old decl is an error_mark_node, then that implies that we have already issued an error (earlier) for some bogus type specification, and in that case, it is rather pointless to harass the user with yet more error message about the same declaration, so just pretend the types match here. */ if (TREE_TYPE (newdecl) == error_mark_node || TREE_TYPE (olddecl) == error_mark_node) return error_mark_node; /* Check for redeclaration and other discrepancies. */ if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_ARTIFICIAL (olddecl) /* A C++20 implicit friend operator== uses the normal path (94462). */ && !DECL_HIDDEN_FRIEND_P (olddecl)) { if (TREE_CODE (newdecl) != FUNCTION_DECL) { /* Avoid warnings redeclaring built-ins which have not been explicitly declared. */ if (DECL_ANTICIPATED (olddecl)) { if (TREE_PUBLIC (newdecl) && CP_DECL_CONTEXT (newdecl) == global_namespace) warning_at (newdecl_loc, OPT_Wbuiltin_declaration_mismatch, "built-in function %qD declared as non-function", newdecl); return NULL_TREE; } /* If you declare a built-in or predefined function name as static, the old definition is overridden, but optionally warn this was a bad choice of name. */ if (! TREE_PUBLIC (newdecl)) { warning_at (newdecl_loc, OPT_Wshadow, fndecl_built_in_p (olddecl) ? G_("shadowing built-in function %q#D") : G_("shadowing library function %q#D"), olddecl); /* Discard the old built-in function. */ return NULL_TREE; } /* If the built-in is not ansi, then programs can override it even globally without an error. */ else if (! fndecl_built_in_p (olddecl)) warning_at (newdecl_loc, 0, "library function %q#D redeclared as non-function %q#D", olddecl, newdecl); else error_at (newdecl_loc, "declaration of %q#D conflicts with built-in " "declaration %q#D", newdecl, olddecl); return NULL_TREE; } else if (DECL_OMP_DECLARE_REDUCTION_P (olddecl)) { gcc_assert (DECL_OMP_DECLARE_REDUCTION_P (newdecl)); error_at (newdecl_loc, "redeclaration of %<pragma omp declare reduction%>"); inform (olddecl_loc, "previous %<pragma omp declare reduction%> declaration"); return error_mark_node; } else if (!types_match) { /* Avoid warnings redeclaring built-ins which have not been explicitly declared. */ if (DECL_ANTICIPATED (olddecl)) { tree t1, t2; /* A new declaration doesn't match a built-in one unless it is also extern "C". */ gcc_assert (DECL_IS_BUILTIN (olddecl)); gcc_assert (DECL_EXTERN_C_P (olddecl)); if (!DECL_EXTERN_C_P (newdecl)) return NULL_TREE; for (t1 = TYPE_ARG_TYPES (TREE_TYPE (newdecl)), t2 = TYPE_ARG_TYPES (TREE_TYPE (olddecl)); t1 || t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)) { if (!t1 || !t2) break; /* FILE, tm types are not known at the time we create the builtins. */ for (unsigned i = 0; i < sizeof (builtin_structptr_types) / sizeof (builtin_structptr_type); ++i) if (TREE_VALUE (t2) == builtin_structptr_types[i].node) { tree t = TREE_VALUE (t1); if (TYPE_PTR_P (t) && TYPE_IDENTIFIER (TREE_TYPE (t)) == get_identifier (builtin_structptr_types[i].str) && compparms (TREE_CHAIN (t1), TREE_CHAIN (t2))) { tree oldargs = TYPE_ARG_TYPES (TREE_TYPE (olddecl)); TYPE_ARG_TYPES (TREE_TYPE (olddecl)) = TYPE_ARG_TYPES (TREE_TYPE (newdecl)); types_match = decls_match (newdecl, olddecl); if (types_match) return duplicate_decls (newdecl, olddecl, newdecl_is_friend); TYPE_ARG_TYPES (TREE_TYPE (olddecl)) = oldargs; } goto next_arg; } if (! same_type_p (TREE_VALUE (t1), TREE_VALUE (t2))) break; next_arg:; } warning_at (newdecl_loc, OPT_Wbuiltin_declaration_mismatch, "declaration of %q#D conflicts with built-in " "declaration %q#D", newdecl, olddecl); } else if ((DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl)) || compparms (TYPE_ARG_TYPES (TREE_TYPE (newdecl)), TYPE_ARG_TYPES (TREE_TYPE (olddecl)))) { /* Don't really override olddecl for __* prefixed builtins except for __[^b]*_chk, the compiler might be using those explicitly. */ if (fndecl_built_in_p (olddecl)) { tree id = DECL_NAME (olddecl); const char *name = IDENTIFIER_POINTER (id); size_t len; if (name[0] == '_' && name[1] == '_' && (strncmp (name + 2, "builtin_", strlen ("builtin_")) == 0 || (len = strlen (name)) <= strlen ("___chk") || memcmp (name + len - strlen ("_chk"), "_chk", strlen ("_chk") + 1) != 0)) { if (DECL_INITIAL (newdecl)) { error_at (newdecl_loc, "definition of %q#D ambiguates built-in " "declaration %q#D", newdecl, olddecl); return error_mark_node; } auto_diagnostic_group d; if (permerror (newdecl_loc, "new declaration %q#D ambiguates built-in" " declaration %q#D", newdecl, olddecl) && flag_permissive) inform (newdecl_loc, "ignoring the %q#D declaration", newdecl); return flag_permissive ? olddecl : error_mark_node; } } /* A near match; override the builtin. */ if (TREE_PUBLIC (newdecl)) warning_at (newdecl_loc, OPT_Wbuiltin_declaration_mismatch, "new declaration %q#D ambiguates built-in " "declaration %q#D", newdecl, olddecl); else warning (OPT_Wshadow, fndecl_built_in_p (olddecl) ? G_("shadowing built-in function %q#D") : G_("shadowing library function %q#D"), olddecl); } else /* Discard the old built-in function. */ return NULL_TREE; /* Replace the old RTL to avoid problems with inlining. */ COPY_DECL_RTL (newdecl, olddecl); } /* Even if the types match, prefer the new declarations type for built-ins which have not been explicitly declared, for exception lists, etc... */ else if (DECL_IS_BUILTIN (olddecl)) { tree type = TREE_TYPE (newdecl); tree attribs = (*targetm.merge_type_attributes) (TREE_TYPE (olddecl), type); type = cp_build_type_attribute_variant (type, attribs); TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = type; } /* If a function is explicitly declared "throw ()", propagate that to the corresponding builtin. */ if (DECL_BUILT_IN_CLASS (olddecl) == BUILT_IN_NORMAL && DECL_ANTICIPATED (olddecl) && TREE_NOTHROW (newdecl) && !TREE_NOTHROW (olddecl)) { enum built_in_function fncode = DECL_FUNCTION_CODE (olddecl); tree tmpdecl = builtin_decl_explicit (fncode); if (tmpdecl && tmpdecl != olddecl && types_match) TREE_NOTHROW (tmpdecl) = 1; } /* Whether or not the builtin can throw exceptions has no bearing on this declarator. */ TREE_NOTHROW (olddecl) = 0; if (DECL_THIS_STATIC (newdecl) && !DECL_THIS_STATIC (olddecl)) { /* If a builtin function is redeclared as `static', merge the declarations, but make the original one static. */ DECL_THIS_STATIC (olddecl) = 1; TREE_PUBLIC (olddecl) = 0; /* Make the old declaration consistent with the new one so that all remnants of the builtin-ness of this function will be banished. */ SET_DECL_LANGUAGE (olddecl, DECL_LANGUAGE (newdecl)); COPY_DECL_RTL (newdecl, olddecl); } } else if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { /* C++ Standard, 3.3, clause 4: "[Note: a namespace name or a class template name must be unique in its declarative region (7.3.2, clause 14). ]" */ if (TREE_CODE (olddecl) == NAMESPACE_DECL || TREE_CODE (newdecl) == NAMESPACE_DECL) /* Namespace conflicts with not namespace. */; else if (DECL_TYPE_TEMPLATE_P (olddecl) || DECL_TYPE_TEMPLATE_P (newdecl)) /* Class template conflicts. */; else if ((TREE_CODE (newdecl) == FUNCTION_DECL && DECL_FUNCTION_TEMPLATE_P (olddecl)) || (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_FUNCTION_TEMPLATE_P (newdecl))) { /* One is a function and the other is a template function. */ if (!UDLIT_OPER_P (DECL_NAME (newdecl))) return NULL_TREE; /* There can only be one! */ if (TREE_CODE (newdecl) == TEMPLATE_DECL && check_raw_literal_operator (olddecl)) error_at (newdecl_loc, "literal operator %q#D conflicts with" " raw literal operator", newdecl); else if (check_raw_literal_operator (newdecl)) error_at (newdecl_loc, "raw literal operator %q#D conflicts with" " literal operator template", newdecl); else return NULL_TREE; inform (olddecl_loc, "previous declaration %q#D", olddecl); return error_mark_node; } else if (DECL_IMPLICIT_TYPEDEF_P (olddecl) || DECL_IMPLICIT_TYPEDEF_P (newdecl)) /* One is an implicit typedef, that's ok. */ return NULL_TREE; error ("%q#D redeclared as different kind of entity", newdecl); inform (olddecl_loc, "previous declaration %q#D", olddecl); return error_mark_node; } else if (!types_match) { if (CP_DECL_CONTEXT (newdecl) != CP_DECL_CONTEXT (olddecl)) /* These are certainly not duplicate declarations; they're from different scopes. */ return NULL_TREE; if (TREE_CODE (newdecl) == TEMPLATE_DECL) { tree oldres = DECL_TEMPLATE_RESULT (olddecl); tree newres = DECL_TEMPLATE_RESULT (newdecl); /* The name of a class template may not be declared to refer to any other template, class, function, object, namespace, value, or type in the same scope. */ if (TREE_CODE (oldres) == TYPE_DECL || TREE_CODE (newres) == TYPE_DECL) { error_at (newdecl_loc, "conflicting declaration of template %q#D", newdecl); inform (olddecl_loc, "previous declaration %q#D", olddecl); return error_mark_node; } else if (TREE_CODE (oldres) == FUNCTION_DECL && TREE_CODE (newres) == FUNCTION_DECL) { if (duplicate_function_template_decls (newdecl, olddecl)) return error_mark_node; return NULL_TREE; } else if (check_concept_refinement (olddecl, newdecl)) return error_mark_node; return NULL_TREE; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { if (DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl)) { error_at (newdecl_loc, "conflicting declaration of C function %q#D", newdecl); inform (olddecl_loc, "previous declaration %q#D", olddecl); return NULL_TREE; } /* For function versions, params and types match, but they are not ambiguous. */ else if ((!DECL_FUNCTION_VERSIONED (newdecl) && !DECL_FUNCTION_VERSIONED (olddecl)) // The functions have the same parameter types. && compparms (TYPE_ARG_TYPES (TREE_TYPE (newdecl)), TYPE_ARG_TYPES (TREE_TYPE (olddecl))) // And the same constraints. && equivalently_constrained (newdecl, olddecl)) { error_at (newdecl_loc, "ambiguating new declaration of %q#D", newdecl); inform (olddecl_loc, "old declaration %q#D", olddecl); return error_mark_node; } else return NULL_TREE; } else { error_at (newdecl_loc, "conflicting declaration %q#D", newdecl); inform (olddecl_loc, "previous declaration as %q#D", olddecl); return error_mark_node; } } else if (TREE_CODE (newdecl) == FUNCTION_DECL && ((DECL_TEMPLATE_SPECIALIZATION (olddecl) && (!DECL_TEMPLATE_INFO (newdecl) || (DECL_TI_TEMPLATE (newdecl) != DECL_TI_TEMPLATE (olddecl)))) || (DECL_TEMPLATE_SPECIALIZATION (newdecl) && (!DECL_TEMPLATE_INFO (olddecl) || (DECL_TI_TEMPLATE (olddecl) != DECL_TI_TEMPLATE (newdecl)))))) /* It's OK to have a template specialization and a non-template with the same type, or to have specializations of two different templates with the same type. Note that if one is a specialization, and the other is an instantiation of the same template, that we do not exit at this point. That situation can occur if we instantiate a template class, and then specialize one of its methods. This situation is valid, but the declarations must be merged in the usual way. */ return NULL_TREE; else if (TREE_CODE (newdecl) == FUNCTION_DECL && ((DECL_TEMPLATE_INSTANTIATION (olddecl) && !DECL_USE_TEMPLATE (newdecl)) || (DECL_TEMPLATE_INSTANTIATION (newdecl) && !DECL_USE_TEMPLATE (olddecl)))) /* One of the declarations is a template instantiation, and the other is not a template at all. That's OK. */ return NULL_TREE; else if (TREE_CODE (newdecl) == NAMESPACE_DECL) { /* In [namespace.alias] we have: In a declarative region, a namespace-alias-definition can be used to redefine a namespace-alias declared in that declarative region to refer only to the namespace to which it already refers. Therefore, if we encounter a second alias directive for the same alias, we can just ignore the second directive. */ if (DECL_NAMESPACE_ALIAS (newdecl) && (DECL_NAMESPACE_ALIAS (newdecl) == DECL_NAMESPACE_ALIAS (olddecl))) return olddecl; /* Leave it to update_binding to merge or report error. */ return NULL_TREE; } else { const char *errmsg = redeclaration_error_message (newdecl, olddecl); if (errmsg) { auto_diagnostic_group d; error_at (newdecl_loc, errmsg, newdecl); if (DECL_NAME (olddecl) != NULL_TREE) inform (olddecl_loc, (DECL_INITIAL (olddecl) && namespace_bindings_p ()) ? G_("%q#D previously defined here") : G_("%q#D previously declared here"), olddecl); return error_mark_node; } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_INITIAL (olddecl) != NULL_TREE && !prototype_p (TREE_TYPE (olddecl)) && prototype_p (TREE_TYPE (newdecl))) { /* Prototype decl follows defn w/o prototype. */ auto_diagnostic_group d; if (warning_at (newdecl_loc, 0, "prototype specified for %q#D", newdecl)) inform (olddecl_loc, "previous non-prototype definition here"); } else if (VAR_OR_FUNCTION_DECL_P (olddecl) && DECL_LANGUAGE (newdecl) != DECL_LANGUAGE (olddecl)) { /* [dcl.link] If two declarations of the same function or object specify different linkage-specifications ..., the program is ill-formed.... Except for functions with C++ linkage, a function declaration without a linkage specification shall not precede the first linkage specification for that function. A function can be declared without a linkage specification after an explicit linkage specification has been seen; the linkage explicitly specified in the earlier declaration is not affected by such a function declaration. DR 563 raises the question why the restrictions on functions should not also apply to objects. Older versions of G++ silently ignore the linkage-specification for this example: namespace N { extern int i; extern "C" int i; } which is clearly wrong. Therefore, we now treat objects like functions. */ if (current_lang_depth () == 0) { /* There is no explicit linkage-specification, so we use the linkage from the previous declaration. */ retrofit_lang_decl (newdecl); SET_DECL_LANGUAGE (newdecl, DECL_LANGUAGE (olddecl)); } else { auto_diagnostic_group d; error_at (newdecl_loc, "conflicting declaration of %q#D with %qL linkage", newdecl, DECL_LANGUAGE (newdecl)); inform (olddecl_loc, "previous declaration with %qL linkage", DECL_LANGUAGE (olddecl)); } } if (DECL_LANG_SPECIFIC (olddecl) && DECL_USE_TEMPLATE (olddecl)) ; else if (TREE_CODE (olddecl) == FUNCTION_DECL) { /* Note: free functions, as TEMPLATE_DECLs, are handled below. */ if (DECL_FUNCTION_MEMBER_P (olddecl) && (/* grokfndecl passes member function templates too as FUNCTION_DECLs. */ DECL_TEMPLATE_INFO (olddecl) /* C++11 8.3.6/6. Default arguments for a member function of a class template shall be specified on the initial declaration of the member function within the class template. */ || CLASSTYPE_TEMPLATE_INFO (CP_DECL_CONTEXT (olddecl)))) check_redeclaration_no_default_args (newdecl); else { tree t1 = FUNCTION_FIRST_USER_PARMTYPE (olddecl); tree t2 = FUNCTION_FIRST_USER_PARMTYPE (newdecl); int i = 1; for (; t1 && t1 != void_list_node; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2), i++) if (TREE_PURPOSE (t1) && TREE_PURPOSE (t2)) { if (simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)) == 1) { auto_diagnostic_group d; if (permerror (newdecl_loc, "default argument given for parameter " "%d of %q#D", i, newdecl)) inform (olddecl_loc, "previous specification in %q#D here", olddecl); } else { auto_diagnostic_group d; error_at (newdecl_loc, "default argument given for parameter %d " "of %q#D", i, newdecl); inform (olddecl_loc, "previous specification in %q#D here", olddecl); } } /* C++17 11.3.6/4: "If a friend declaration specifies a default argument expression, that declaration... shall be the only declaration of the function or function template in the translation unit." */ check_no_redeclaration_friend_default_args (olddecl, newdecl, DECL_HIDDEN_FRIEND_P (olddecl)); } } } /* Do not merge an implicit typedef with an explicit one. In: class A; ... typedef class A A __attribute__ ((foo)); the attribute should apply only to the typedef. */ if (TREE_CODE (olddecl) == TYPE_DECL && (DECL_IMPLICIT_TYPEDEF_P (olddecl) || DECL_IMPLICIT_TYPEDEF_P (newdecl))) return NULL_TREE; if (!validate_constexpr_redeclaration (olddecl, newdecl)) return error_mark_node; /* We have committed to returning OLDDECL at this point. */ /* If new decl is `static' and an `extern' was seen previously, warn about it. */ warn_extern_redeclared_static (newdecl, olddecl); /* True to merge attributes between the declarations, false to set OLDDECL's attributes to those of NEWDECL (for template explicit specializations that specify their own attributes independent of those specified for the primary template). */ const bool merge_attr = (TREE_CODE (newdecl) != FUNCTION_DECL || !DECL_TEMPLATE_SPECIALIZATION (newdecl) || DECL_TEMPLATE_SPECIALIZATION (olddecl)); if (TREE_CODE (newdecl) == FUNCTION_DECL) { if (merge_attr) { if (diagnose_mismatched_attributes (olddecl, newdecl)) inform (olddecl_loc, DECL_INITIAL (olddecl) ? G_("previous definition of %qD here") : G_("previous declaration of %qD here"), olddecl); /* [dcl.attr.noreturn]: The first declaration of a function shall specify the noreturn attribute if any declaration of that function specifies the noreturn attribute. */ tree a; if (TREE_THIS_VOLATILE (newdecl) && !TREE_THIS_VOLATILE (olddecl) /* This applies to [[noreturn]] only, not its GNU variants. */ && (a = lookup_attribute ("noreturn", DECL_ATTRIBUTES (newdecl))) && cxx11_attribute_p (a) && get_attribute_namespace (a) == NULL_TREE) { error_at (newdecl_loc, "function %qD declared %<[[noreturn]]%> " "but its first declaration was not", newdecl); inform (olddecl_loc, "previous declaration of %qD", olddecl); } } /* Now that functions must hold information normally held by field decls, there is extra work to do so that declaration information does not get destroyed during definition. */ if (DECL_VINDEX (olddecl)) DECL_VINDEX (newdecl) = DECL_VINDEX (olddecl); if (DECL_CONTEXT (olddecl)) DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_STATIC_CONSTRUCTOR (newdecl) |= DECL_STATIC_CONSTRUCTOR (olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_PURE_VIRTUAL_P (newdecl) |= DECL_PURE_VIRTUAL_P (olddecl); DECL_VIRTUAL_P (newdecl) |= DECL_VIRTUAL_P (olddecl); DECL_INVALID_OVERRIDER_P (newdecl) |= DECL_INVALID_OVERRIDER_P (olddecl); DECL_FINAL_P (newdecl) |= DECL_FINAL_P (olddecl); DECL_OVERRIDE_P (newdecl) |= DECL_OVERRIDE_P (olddecl); DECL_THIS_STATIC (newdecl) |= DECL_THIS_STATIC (olddecl); DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (newdecl) |= DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (olddecl); if (DECL_OVERLOADED_OPERATOR_P (olddecl)) DECL_OVERLOADED_OPERATOR_CODE_RAW (newdecl) = DECL_OVERLOADED_OPERATOR_CODE_RAW (olddecl); new_defines_function = DECL_INITIAL (newdecl) != NULL_TREE; /* Optionally warn about more than one declaration for the same name, but don't warn about a function declaration followed by a definition. */ if (warn_redundant_decls && ! DECL_ARTIFICIAL (olddecl) && !(new_defines_function && DECL_INITIAL (olddecl) == NULL_TREE) /* Don't warn about extern decl followed by definition. */ && !(DECL_EXTERNAL (olddecl) && ! DECL_EXTERNAL (newdecl)) /* Don't warn about friends, let add_friend take care of it. */ && ! (newdecl_is_friend || DECL_FRIEND_P (olddecl)) /* Don't warn about declaration followed by specialization. */ && (! DECL_TEMPLATE_SPECIALIZATION (newdecl) || DECL_TEMPLATE_SPECIALIZATION (olddecl))) { auto_diagnostic_group d; if (warning_at (newdecl_loc, OPT_Wredundant_decls, "redundant redeclaration of %qD in same scope", newdecl)) inform (olddecl_loc, "previous declaration of %qD", olddecl); } if (!(DECL_TEMPLATE_INSTANTIATION (olddecl) && DECL_TEMPLATE_SPECIALIZATION (newdecl))) { if (DECL_DELETED_FN (newdecl)) { auto_diagnostic_group d; error_at (newdecl_loc, "deleted definition of %qD", newdecl); inform (olddecl_loc, "previous declaration of %qD", olddecl); } DECL_DELETED_FN (newdecl) |= DECL_DELETED_FN (olddecl); } } /* Deal with C++: must preserve virtual function table size. */ if (TREE_CODE (olddecl) == TYPE_DECL) { tree newtype = TREE_TYPE (newdecl); tree oldtype = TREE_TYPE (olddecl); if (newtype != error_mark_node && oldtype != error_mark_node && TYPE_LANG_SPECIFIC (newtype) && TYPE_LANG_SPECIFIC (oldtype)) CLASSTYPE_FRIEND_CLASSES (newtype) = CLASSTYPE_FRIEND_CLASSES (oldtype); DECL_ORIGINAL_TYPE (newdecl) = DECL_ORIGINAL_TYPE (olddecl); } /* Copy all the DECL_... slots specified in the new decl except for any that we copy here from the old type. */ if (merge_attr) DECL_ATTRIBUTES (newdecl) = (*targetm.merge_decl_attributes) (olddecl, newdecl); else DECL_ATTRIBUTES (olddecl) = DECL_ATTRIBUTES (newdecl); if (DECL_DECLARES_FUNCTION_P (olddecl)) { olddecl_friend = DECL_FRIEND_P (olddecl); olddecl_hidden_friend = DECL_HIDDEN_FRIEND_P (olddecl); hidden_friend = (DECL_ANTICIPATED (olddecl) && DECL_HIDDEN_FRIEND_P (olddecl) && newdecl_is_friend); if (!hidden_friend) { DECL_ANTICIPATED (olddecl) = 0; DECL_HIDDEN_FRIEND_P (olddecl) = 0; } } if (TREE_CODE (newdecl) == TEMPLATE_DECL) { tree old_result = DECL_TEMPLATE_RESULT (olddecl); tree new_result = DECL_TEMPLATE_RESULT (newdecl); TREE_TYPE (olddecl) = TREE_TYPE (old_result); /* The new decl should not already have gathered any specializations. */ gcc_assert (!DECL_TEMPLATE_SPECIALIZATIONS (newdecl)); DECL_ATTRIBUTES (old_result) = (*targetm.merge_decl_attributes) (old_result, new_result); if (DECL_FUNCTION_TEMPLATE_P (newdecl)) { if (DECL_SOURCE_LOCATION (newdecl) != DECL_SOURCE_LOCATION (olddecl)) { /* Per C++11 8.3.6/4, default arguments cannot be added in later declarations of a function template. */ check_redeclaration_no_default_args (newdecl); /* C++17 11.3.6/4: "If a friend declaration specifies a default argument expression, that declaration... shall be the only declaration of the function or function template in the translation unit." */ check_no_redeclaration_friend_default_args (old_result, new_result, olddecl_hidden_friend); } check_default_args (newdecl); if (GNU_INLINE_P (old_result) != GNU_INLINE_P (new_result) && DECL_INITIAL (new_result)) { if (DECL_INITIAL (old_result)) DECL_UNINLINABLE (old_result) = 1; else DECL_UNINLINABLE (old_result) = DECL_UNINLINABLE (new_result); DECL_EXTERNAL (old_result) = DECL_EXTERNAL (new_result); DECL_NOT_REALLY_EXTERN (old_result) = DECL_NOT_REALLY_EXTERN (new_result); DECL_INTERFACE_KNOWN (old_result) = DECL_INTERFACE_KNOWN (new_result); DECL_DECLARED_INLINE_P (old_result) = DECL_DECLARED_INLINE_P (new_result); DECL_DISREGARD_INLINE_LIMITS (old_result) |= DECL_DISREGARD_INLINE_LIMITS (new_result); } else { DECL_DECLARED_INLINE_P (old_result) |= DECL_DECLARED_INLINE_P (new_result); DECL_DISREGARD_INLINE_LIMITS (old_result) |= DECL_DISREGARD_INLINE_LIMITS (new_result); check_redeclaration_exception_specification (newdecl, olddecl); merge_attribute_bits (new_result, old_result); } } /* If the new declaration is a definition, update the file and line information on the declaration, and also make the old declaration the same definition. */ if (DECL_INITIAL (new_result) != NULL_TREE) { DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (old_result) = DECL_SOURCE_LOCATION (newdecl); DECL_INITIAL (old_result) = DECL_INITIAL (new_result); if (DECL_FUNCTION_TEMPLATE_P (newdecl)) { tree parm; DECL_ARGUMENTS (old_result) = DECL_ARGUMENTS (new_result); for (parm = DECL_ARGUMENTS (old_result); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = old_result; } } return olddecl; } if (types_match) { if (TREE_CODE (newdecl) == FUNCTION_DECL) check_redeclaration_exception_specification (newdecl, olddecl); /* Automatically handles default parameters. */ tree oldtype = TREE_TYPE (olddecl); tree newtype; /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = TREE_TYPE (newdecl); newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem)); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); if (TYPE_MAIN_VARIANT (remove) == remove) { gcc_assert (TYPE_NEXT_VARIANT (remove) == NULL_TREE); /* If remove is the main variant, no need to remove that from the list. One of the DECL_ORIGINAL_TYPE variants, e.g. created for aligned attribute, might still refer to the newdecl TYPE_DECL though, so remove that one in that case. */ if (tree orig = DECL_ORIGINAL_TYPE (newdecl)) if (orig != remove) for (tree t = TYPE_MAIN_VARIANT (orig); t; t = TYPE_MAIN_VARIANT (t)) if (TYPE_NAME (TYPE_NEXT_VARIANT (t)) == newdecl) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (TYPE_NEXT_VARIANT (t)); break; } } else for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } else if (merge_attr) newtype = merge_types (TREE_TYPE (newdecl), TREE_TYPE (olddecl)); else newtype = TREE_TYPE (newdecl); if (VAR_P (newdecl)) { DECL_THIS_EXTERN (newdecl) |= DECL_THIS_EXTERN (olddecl); /* For already initialized vars, TREE_READONLY could have been cleared in cp_finish_decl, because the var needs runtime initialization or destruction. Make sure not to set TREE_READONLY on it again. */ if (DECL_INITIALIZED_P (olddecl) && !DECL_EXTERNAL (olddecl) && !TREE_READONLY (olddecl)) TREE_READONLY (newdecl) = 0; DECL_INITIALIZED_P (newdecl) |= DECL_INITIALIZED_P (olddecl); DECL_NONTRIVIALLY_INITIALIZED_P (newdecl) |= DECL_NONTRIVIALLY_INITIALIZED_P (olddecl); if (DECL_DEPENDENT_INIT_P (olddecl)) SET_DECL_DEPENDENT_INIT_P (newdecl, true); DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (newdecl) |= DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (olddecl); DECL_DECLARED_CONSTEXPR_P (newdecl) |= DECL_DECLARED_CONSTEXPR_P (olddecl); /* Merge the threadprivate attribute from OLDDECL into NEWDECL. */ if (DECL_LANG_SPECIFIC (olddecl) && CP_DECL_THREADPRIVATE_P (olddecl)) { /* Allocate a LANG_SPECIFIC structure for NEWDECL, if needed. */ retrofit_lang_decl (newdecl); CP_DECL_THREADPRIVATE_P (newdecl) = 1; } } /* An explicit specialization of a function template or of a member function of a class template can be declared transaction_safe independently of whether the corresponding template entity is declared transaction_safe. */ if (flag_tm && TREE_CODE (newdecl) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATION (olddecl) && DECL_TEMPLATE_SPECIALIZATION (newdecl) && tx_safe_fn_type_p (newtype) && !tx_safe_fn_type_p (TREE_TYPE (newdecl))) newtype = tx_unsafe_fn_variant (newtype); TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = newtype; if (TREE_CODE (newdecl) == FUNCTION_DECL) check_default_args (newdecl); /* Lay the type out, unless already done. */ if (! same_type_p (newtype, oldtype) && TREE_TYPE (newdecl) != error_mark_node && !(processing_template_decl && uses_template_parms (newdecl))) layout_type (TREE_TYPE (newdecl)); if ((VAR_P (newdecl) || TREE_CODE (newdecl) == PARM_DECL || TREE_CODE (newdecl) == RESULT_DECL || TREE_CODE (newdecl) == FIELD_DECL || TREE_CODE (newdecl) == TYPE_DECL) && !(processing_template_decl && uses_template_parms (newdecl))) layout_decl (newdecl, 0); /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* Preserve function specific target and optimization options */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); } else { /* Merge the const type qualifier. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; /* Merge the volatile type qualifier. */ if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; } /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == NULL_TREE && DECL_INITIAL (olddecl) != NULL_TREE) { DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); } } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); if (DECL_IS_OPERATOR_NEW_P (olddecl)) DECL_SET_IS_OPERATOR_NEW (newdecl, true); DECL_LOOPING_CONST_OR_PURE_P (newdecl) |= DECL_LOOPING_CONST_OR_PURE_P (olddecl); DECL_IS_REPLACEABLE_OPERATOR (newdecl) |= DECL_IS_REPLACEABLE_OPERATOR (olddecl); if (merge_attr) merge_attribute_bits (newdecl, olddecl); else { /* Merge the noreturn bit. */ TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl); TREE_READONLY (olddecl) = TREE_READONLY (newdecl); TREE_NOTHROW (olddecl) = TREE_NOTHROW (newdecl); DECL_IS_MALLOC (olddecl) = DECL_IS_MALLOC (newdecl); DECL_PURE_P (olddecl) = DECL_PURE_P (newdecl); } /* Keep the old RTL. */ COPY_DECL_RTL (olddecl, newdecl); } else if (VAR_P (newdecl) && (DECL_SIZE (olddecl) || !DECL_SIZE (newdecl))) { /* Keep the old RTL. We cannot keep the old RTL if the old declaration was for an incomplete object and the new declaration is not since many attributes of the RTL will change. */ COPY_DECL_RTL (olddecl, newdecl); } } /* If cannot merge, then use the new type and qualifiers, and don't preserve the old rtl. */ else { /* Clean out any memory we had of the old declaration. */ tree oldstatic = value_member (olddecl, static_aggregates); if (oldstatic) TREE_VALUE (oldstatic) = error_mark_node; TREE_TYPE (olddecl) = TREE_TYPE (newdecl); TREE_READONLY (olddecl) = TREE_READONLY (newdecl); TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl); TREE_NOTHROW (olddecl) = TREE_NOTHROW (newdecl); TREE_SIDE_EFFECTS (olddecl) = TREE_SIDE_EFFECTS (newdecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); DECL_DEFER_OUTPUT (newdecl) |= DECL_DEFER_OUTPUT (olddecl); TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); TREE_STATIC (olddecl) = TREE_STATIC (newdecl) |= TREE_STATIC (olddecl); if (! DECL_EXTERNAL (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (! DECL_COMDAT (olddecl)) DECL_COMDAT (newdecl) = 0; new_template_info = NULL_TREE; if (DECL_LANG_SPECIFIC (newdecl) && DECL_LANG_SPECIFIC (olddecl)) { bool new_redefines_gnu_inline = false; if (new_defines_function && ((DECL_INTERFACE_KNOWN (olddecl) && TREE_CODE (olddecl) == FUNCTION_DECL) || (TREE_CODE (olddecl) == TEMPLATE_DECL && (TREE_CODE (DECL_TEMPLATE_RESULT (olddecl)) == FUNCTION_DECL)))) { tree fn = olddecl; if (TREE_CODE (fn) == TEMPLATE_DECL) fn = DECL_TEMPLATE_RESULT (olddecl); new_redefines_gnu_inline = GNU_INLINE_P (fn) && DECL_INITIAL (fn); } if (!new_redefines_gnu_inline) { DECL_INTERFACE_KNOWN (newdecl) |= DECL_INTERFACE_KNOWN (olddecl); DECL_NOT_REALLY_EXTERN (newdecl) |= DECL_NOT_REALLY_EXTERN (olddecl); DECL_COMDAT (newdecl) |= DECL_COMDAT (olddecl); } DECL_TEMPLATE_INSTANTIATED (newdecl) |= DECL_TEMPLATE_INSTANTIATED (olddecl); DECL_ODR_USED (newdecl) |= DECL_ODR_USED (olddecl); /* If the OLDDECL is an instantiation and/or specialization, then the NEWDECL must be too. But, it may not yet be marked as such if the caller has created NEWDECL, but has not yet figured out that it is a redeclaration. */ if (!DECL_USE_TEMPLATE (newdecl)) DECL_USE_TEMPLATE (newdecl) = DECL_USE_TEMPLATE (olddecl); /* Don't really know how much of the language-specific values we should copy from old to new. */ DECL_IN_AGGR_P (newdecl) = DECL_IN_AGGR_P (olddecl); DECL_INITIALIZED_IN_CLASS_P (newdecl) |= DECL_INITIALIZED_IN_CLASS_P (olddecl); if (LANG_DECL_HAS_MIN (newdecl)) { DECL_ACCESS (newdecl) = DECL_ACCESS (olddecl); if (DECL_TEMPLATE_INFO (newdecl)) { new_template_info = DECL_TEMPLATE_INFO (newdecl); if (DECL_TEMPLATE_INSTANTIATION (olddecl) && DECL_TEMPLATE_SPECIALIZATION (newdecl)) /* Remember the presence of explicit specialization args. */ TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (olddecl)) = TINFO_USED_TEMPLATE_ID (new_template_info); } DECL_TEMPLATE_INFO (newdecl) = DECL_TEMPLATE_INFO (olddecl); } if (DECL_DECLARES_FUNCTION_P (newdecl)) { /* Only functions have these fields. */ DECL_NONCONVERTING_P (newdecl) = DECL_NONCONVERTING_P (olddecl); DECL_BEFRIENDING_CLASSES (newdecl) = chainon (DECL_BEFRIENDING_CLASSES (newdecl), DECL_BEFRIENDING_CLASSES (olddecl)); /* DECL_THUNKS is only valid for virtual functions, otherwise it is a DECL_FRIEND_CONTEXT. */ if (DECL_VIRTUAL_P (newdecl)) SET_DECL_THUNKS (newdecl, DECL_THUNKS (olddecl)); } else if (VAR_P (newdecl)) { /* Only variables have this field. */ if (VAR_HAD_UNKNOWN_BOUND (olddecl)) SET_VAR_HAD_UNKNOWN_BOUND (newdecl); } } if (TREE_CODE (newdecl) == FUNCTION_DECL) { tree parm; /* Merge parameter attributes. */ tree oldarg, newarg; for (oldarg = DECL_ARGUMENTS(olddecl), newarg = DECL_ARGUMENTS(newdecl); oldarg && newarg; oldarg = DECL_CHAIN(oldarg), newarg = DECL_CHAIN(newarg)) { DECL_ATTRIBUTES (newarg) = (*targetm.merge_decl_attributes) (oldarg, newarg); DECL_ATTRIBUTES (oldarg) = DECL_ATTRIBUTES (newarg); } if (DECL_TEMPLATE_INSTANTIATION (olddecl) && !DECL_TEMPLATE_INSTANTIATION (newdecl)) { /* If newdecl is not a specialization, then it is not a template-related function at all. And that means that we should have exited above, returning 0. */ gcc_assert (DECL_TEMPLATE_SPECIALIZATION (newdecl)); if (DECL_ODR_USED (olddecl)) /* From [temp.expl.spec]: If a template, a member template or the member of a class template is explicitly specialized then that specialization shall be declared before the first use of that specialization that would cause an implicit instantiation to take place, in every translation unit in which such a use occurs. */ error ("explicit specialization of %qD after first use", olddecl); SET_DECL_TEMPLATE_SPECIALIZATION (olddecl); DECL_COMDAT (newdecl) = (TREE_PUBLIC (newdecl) && DECL_DECLARED_INLINE_P (newdecl)); /* Don't propagate visibility from the template to the specialization here. We'll do that in determine_visibility if appropriate. */ DECL_VISIBILITY_SPECIFIED (olddecl) = 0; /* [temp.expl.spec/14] We don't inline explicit specialization just because the primary template says so. */ gcc_assert (!merge_attr); DECL_DECLARED_INLINE_P (olddecl) = DECL_DECLARED_INLINE_P (newdecl); DECL_DISREGARD_INLINE_LIMITS (olddecl) = DECL_DISREGARD_INLINE_LIMITS (newdecl); DECL_UNINLINABLE (olddecl) = DECL_UNINLINABLE (newdecl); } else if (new_defines_function && DECL_INITIAL (olddecl)) { /* Never inline re-defined extern inline functions. FIXME: this could be better handled by keeping both function as separate declarations. */ DECL_UNINLINABLE (newdecl) = 1; } else { if (DECL_PENDING_INLINE_P (olddecl)) { DECL_PENDING_INLINE_P (newdecl) = 1; DECL_PENDING_INLINE_INFO (newdecl) = DECL_PENDING_INLINE_INFO (olddecl); } else if (DECL_PENDING_INLINE_P (newdecl)) ; else if (DECL_SAVED_AUTO_RETURN_TYPE (newdecl) == NULL) DECL_SAVED_AUTO_RETURN_TYPE (newdecl) = DECL_SAVED_AUTO_RETURN_TYPE (olddecl); DECL_DECLARED_INLINE_P (newdecl) |= DECL_DECLARED_INLINE_P (olddecl); DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } /* Preserve abstractness on cloned [cd]tors. */ DECL_ABSTRACT_P (newdecl) = DECL_ABSTRACT_P (olddecl); /* Update newdecl's parms to point at olddecl. */ for (parm = DECL_ARGUMENTS (newdecl); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = olddecl; if (! types_match) { SET_DECL_LANGUAGE (olddecl, DECL_LANGUAGE (newdecl)); COPY_DECL_ASSEMBLER_NAME (newdecl, olddecl); COPY_DECL_RTL (newdecl, olddecl); } if (! types_match || new_defines_function) { /* These need to be copied so that the names are available. Note that if the types do match, we'll preserve inline info and other bits, but if not, we won't. */ DECL_ARGUMENTS (olddecl) = DECL_ARGUMENTS (newdecl); DECL_RESULT (olddecl) = DECL_RESULT (newdecl); } /* If redeclaring a builtin function, it stays built in if newdecl is a gnu_inline definition, or if newdecl is just a declaration. */ if (fndecl_built_in_p (olddecl) && (new_defines_function ? GNU_INLINE_P (newdecl) : types_match)) { copy_decl_built_in_function (newdecl, olddecl); /* If we're keeping the built-in definition, keep the rtl, regardless of declaration matches. */ COPY_DECL_RTL (olddecl, newdecl); if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } copy_attributes_to_builtin (newdecl); } } if (new_defines_function) /* If defining a function declared with other language linkage, use the previously declared language linkage. */ SET_DECL_LANGUAGE (newdecl, DECL_LANGUAGE (olddecl)); else if (types_match) { DECL_RESULT (newdecl) = DECL_RESULT (olddecl); /* Don't clear out the arguments if we're just redeclaring a function. */ if (DECL_ARGUMENTS (olddecl)) DECL_ARGUMENTS (newdecl) = DECL_ARGUMENTS (olddecl); } } else if (TREE_CODE (newdecl) == NAMESPACE_DECL) NAMESPACE_LEVEL (newdecl) = NAMESPACE_LEVEL (olddecl); /* Now preserve various other info from the definition. */ TREE_ADDRESSABLE (newdecl) = TREE_ADDRESSABLE (olddecl); TREE_ASM_WRITTEN (newdecl) = TREE_ASM_WRITTEN (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Warn about conflicting visibility specifications. */ if (DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { auto_diagnostic_group d; if (warning_at (newdecl_loc, OPT_Wattributes, "%qD: visibility attribute ignored because it " "conflicts with previous declaration", newdecl)) inform (olddecl_loc, "previous declaration of %qD", olddecl); } /* Choose the declaration which specified visibility. */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } /* Init priority used to be merged from newdecl to olddecl by the memcpy, so keep this behavior. */ if (VAR_P (newdecl) && DECL_HAS_INIT_PRIORITY_P (newdecl)) { SET_DECL_INIT_PRIORITY (olddecl, DECL_INIT_PRIORITY (newdecl)); DECL_HAS_INIT_PRIORITY_P (olddecl) = 1; } /* Likewise for DECL_ALIGN, DECL_USER_ALIGN and DECL_PACKED. */ if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl)); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); if (DECL_WARN_IF_NOT_ALIGN (olddecl) > DECL_WARN_IF_NOT_ALIGN (newdecl)) SET_DECL_WARN_IF_NOT_ALIGN (newdecl, DECL_WARN_IF_NOT_ALIGN (olddecl)); if (TREE_CODE (newdecl) == FIELD_DECL) DECL_PACKED (olddecl) = DECL_PACKED (newdecl); /* The DECL_LANG_SPECIFIC information in OLDDECL will be replaced with that from NEWDECL below. */ if (DECL_LANG_SPECIFIC (olddecl)) { gcc_assert (DECL_LANG_SPECIFIC (olddecl) != DECL_LANG_SPECIFIC (newdecl)); ggc_free (DECL_LANG_SPECIFIC (olddecl)); } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (VAR_P (newdecl)) { if (DECL_READ_P (olddecl)) DECL_READ_P (newdecl) = 1; else if (DECL_READ_P (newdecl)) DECL_READ_P (olddecl) = 1; } if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Merge the DECL_FUNCTION_VERSIONED information. newdecl will be copied to olddecl and deleted. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_FUNCTION_VERSIONED (olddecl)) { /* Set the flag for newdecl so that it gets copied to olddecl. */ DECL_FUNCTION_VERSIONED (newdecl) = 1; /* newdecl will be purged after copying to olddecl and is no longer a version. */ cgraph_node::delete_function_version_by_decl (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { int function_size; struct symtab_node *snode = symtab_node::get (olddecl); function_size = sizeof (struct tree_decl_common); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), function_size - sizeof (struct tree_common)); memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_function_decl) - sizeof (struct tree_decl_common)); /* Preserve symtab node mapping. */ olddecl->decl_with_vis.symtab_node = snode; if (new_template_info) /* If newdecl is a template instantiation, it is possible that the following sequence of events has occurred: o A friend function was declared in a class template. The class template was instantiated. o The instantiation of the friend declaration was recorded on the instantiation list, and is newdecl. o Later, however, instantiate_class_template called pushdecl on the newdecl to perform name injection. But, pushdecl in turn called duplicate_decls when it discovered that another declaration of a global function with the same name already existed. o Here, in duplicate_decls, we decided to clobber newdecl. If we're going to do that, we'd better make sure that olddecl, and not newdecl, is on the list of instantiations so that if we try to do the instantiation again we won't get the clobbered declaration. */ reregister_specialization (newdecl, new_template_info, olddecl); } else { size_t size = tree_code_size (TREE_CODE (newdecl)); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); switch (TREE_CODE (newdecl)) { case LABEL_DECL: case VAR_DECL: case RESULT_DECL: case PARM_DECL: case FIELD_DECL: case TYPE_DECL: case CONST_DECL: { struct symtab_node *snode = NULL; if (VAR_P (newdecl) && (TREE_STATIC (olddecl) || TREE_PUBLIC (olddecl) || DECL_EXTERNAL (olddecl))) snode = symtab_node::get (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), size - sizeof (struct tree_decl_common) + TREE_CODE_LENGTH (TREE_CODE (newdecl)) * sizeof (char *)); if (VAR_P (newdecl)) olddecl->decl_with_vis.symtab_node = snode; } break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common) + TREE_CODE_LENGTH (TREE_CODE (newdecl)) * sizeof (char *)); break; } } if (VAR_OR_FUNCTION_DECL_P (newdecl)) { if (DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) { /* Merge the section attribute. We want to issue an error if the sections conflict but that must be done later in decl_attributes since we are called before attributes are assigned. */ if (DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); if (DECL_ONE_ONLY (newdecl)) { struct symtab_node *oldsym, *newsym; if (TREE_CODE (olddecl) == FUNCTION_DECL) oldsym = cgraph_node::get_create (olddecl); else oldsym = varpool_node::get_create (olddecl); newsym = symtab_node::get (newdecl); oldsym->set_comdat_group (newsym->get_comdat_group ()); } } if (VAR_P (newdecl) && CP_DECL_THREAD_LOCAL_P (newdecl)) { CP_DECL_THREAD_LOCAL_P (olddecl) = true; if (!processing_template_decl) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); } } DECL_UID (olddecl) = olddecl_uid; if (olddecl_friend) DECL_FRIEND_P (olddecl) = 1; if (hidden_friend) { DECL_ANTICIPATED (olddecl) = 1; DECL_HIDDEN_FRIEND_P (olddecl) = 1; } /* NEWDECL contains the merged attribute lists. Update OLDDECL to be the same. */ DECL_ATTRIBUTES (olddecl) = DECL_ATTRIBUTES (newdecl); /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (VAR_P (olddecl) && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); /* The NEWDECL will no longer be needed. Because every out-of-class declaration of a member results in a call to duplicate_decls, freeing these nodes represents in a significant savings. Before releasing the node, be sore to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between newdecl and oldecl. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (VAR_OR_FUNCTION_DECL_P (newdecl)) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } if (TREE_CODE (olddecl) == FUNCTION_DECL) { tree clone; FOR_EACH_CLONE (clone, olddecl) { DECL_ATTRIBUTES (clone) = DECL_ATTRIBUTES (olddecl); DECL_PRESERVE_P (clone) |= DECL_PRESERVE_P (olddecl); } } /* Remove the associated constraints for newdecl, if any, before reclaiming memory. */ if (flag_concepts) remove_constraints (newdecl); ggc_free (newdecl); return olddecl; } /* Return zero if the declaration NEWDECL is valid when the declaration OLDDECL (assumed to be for the same name) has already been seen. Otherwise return an error message format string with a %s where the identifier should go. */ static const char * redeclaration_error_message (tree newdecl, tree olddecl) { if (TREE_CODE (newdecl) == TYPE_DECL) { /* Because C++ can put things into name space for free, constructs like "typedef struct foo { ... } foo" would look like an erroneous redeclaration. */ if (same_type_p (TREE_TYPE (newdecl), TREE_TYPE (olddecl))) return NULL; else return G_("redefinition of %q#D"); } else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If this is a pure function, its olddecl will actually be the original initialization to `0' (which we force to call abort()). Don't complain about redefinition in this case. */ if (DECL_LANG_SPECIFIC (olddecl) && DECL_PURE_VIRTUAL_P (olddecl) && DECL_INITIAL (olddecl) == NULL_TREE) return NULL; /* If both functions come from different namespaces, this is not a redeclaration - this is a conflict with a used function. */ if (DECL_NAMESPACE_SCOPE_P (olddecl) && DECL_CONTEXT (olddecl) != DECL_CONTEXT (newdecl) && ! decls_match (olddecl, newdecl)) return G_("%qD conflicts with used function"); /* We'll complain about linkage mismatches in warn_extern_redeclared_static. */ /* Defining the same name twice is no good. */ if (decl_defined_p (olddecl) && decl_defined_p (newdecl)) { if (DECL_NAME (olddecl) == NULL_TREE) return G_("%q#D not declared in class"); else if (!GNU_INLINE_P (olddecl) || GNU_INLINE_P (newdecl)) return G_("redefinition of %q#D"); } if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool olda = GNU_INLINE_P (olddecl); bool newa = GNU_INLINE_P (newdecl); if (olda != newa) { if (newa) return G_("%q+D redeclared inline with " "%<gnu_inline%> attribute"); else return G_("%q+D redeclared inline without " "%<gnu_inline%> attribute"); } } /* [class.compare.default]: A definition of a comparison operator as defaulted that appears in a class shall be the first declaration of that function. */ special_function_kind sfk = special_function_p (olddecl); if (sfk == sfk_comparison && DECL_DEFAULTED_FN (newdecl)) return G_("comparison operator %q+D defaulted after " "its first declaration"); check_abi_tag_redeclaration (olddecl, lookup_attribute ("abi_tag", DECL_ATTRIBUTES (olddecl)), lookup_attribute ("abi_tag", DECL_ATTRIBUTES (newdecl))); return NULL; } else if (TREE_CODE (newdecl) == TEMPLATE_DECL) { tree nt, ot; if (TREE_CODE (DECL_TEMPLATE_RESULT (newdecl)) == CONCEPT_DECL) return G_("redefinition of %q#D"); if (TREE_CODE (DECL_TEMPLATE_RESULT (newdecl)) != FUNCTION_DECL) return redeclaration_error_message (DECL_TEMPLATE_RESULT (newdecl), DECL_TEMPLATE_RESULT (olddecl)); if (DECL_TEMPLATE_RESULT (newdecl) == DECL_TEMPLATE_RESULT (olddecl)) return NULL; nt = DECL_TEMPLATE_RESULT (newdecl); if (DECL_TEMPLATE_INFO (nt)) nt = DECL_TEMPLATE_RESULT (template_for_substitution (nt)); ot = DECL_TEMPLATE_RESULT (olddecl); if (DECL_TEMPLATE_INFO (ot)) ot = DECL_TEMPLATE_RESULT (template_for_substitution (ot)); if (DECL_INITIAL (nt) && DECL_INITIAL (ot) && (!GNU_INLINE_P (ot) || GNU_INLINE_P (nt))) return G_("redefinition of %q#D"); if (DECL_DECLARED_INLINE_P (ot) && DECL_DECLARED_INLINE_P (nt)) { bool olda = GNU_INLINE_P (ot); bool newa = GNU_INLINE_P (nt); if (olda != newa) { if (newa) return G_("%q+D redeclared inline with " "%<gnu_inline%> attribute"); else return G_("%q+D redeclared inline without " "%<gnu_inline%> attribute"); } } /* Core issue #226 (C++0x): If a friend function template declaration specifies a default template-argument, that declaration shall be a definition and shall be the only declaration of the function template in the translation unit. */ if ((cxx_dialect != cxx98) && TREE_CODE (ot) == FUNCTION_DECL && DECL_FRIEND_P (ot) && !check_default_tmpl_args (nt, DECL_TEMPLATE_PARMS (newdecl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend_decl=*/2)) return G_("redeclaration of friend %q#D " "may not have default template arguments"); return NULL; } else if (VAR_P (newdecl) && CP_DECL_THREAD_LOCAL_P (newdecl) != CP_DECL_THREAD_LOCAL_P (olddecl) && (! DECL_LANG_SPECIFIC (olddecl) || ! CP_DECL_THREADPRIVATE_P (olddecl) || CP_DECL_THREAD_LOCAL_P (newdecl))) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (CP_DECL_THREAD_LOCAL_P (newdecl)) return G_("thread-local declaration of %q#D follows " "non-thread-local declaration"); else return G_("non-thread-local declaration of %q#D follows " "thread-local declaration"); } else if (toplevel_bindings_p () || DECL_NAMESPACE_SCOPE_P (newdecl)) { /* The objects have been declared at namespace scope. If either is a member of an anonymous union, then this is an invalid redeclaration. For example: int i; union { int i; }; is invalid. */ if ((VAR_P (newdecl) && DECL_ANON_UNION_VAR_P (newdecl)) || (VAR_P (olddecl) && DECL_ANON_UNION_VAR_P (olddecl))) return G_("redeclaration of %q#D"); /* If at least one declaration is a reference, there is no conflict. For example: int i = 3; extern int i; is valid. */ if (DECL_EXTERNAL (newdecl) || DECL_EXTERNAL (olddecl)) return NULL; /* Static data member declared outside a class definition if the variable is defined within the class with constexpr specifier is declaration rather than definition (and deprecated). */ if (cxx_dialect >= cxx17 && VAR_P (olddecl) && DECL_CLASS_SCOPE_P (olddecl) && DECL_DECLARED_CONSTEXPR_P (olddecl) && !DECL_INITIAL (newdecl)) { DECL_EXTERNAL (newdecl) = 1; /* For now, only warn with explicit -Wdeprecated. */ if (global_options_set.x_warn_deprecated) { auto_diagnostic_group d; if (warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wdeprecated, "redundant redeclaration of %<constexpr%> " "static data member %qD", newdecl)) inform (DECL_SOURCE_LOCATION (olddecl), "previous declaration of %qD", olddecl); } return NULL; } /* Reject two definitions. */ return G_("redefinition of %q#D"); } else { /* Objects declared with block scope: */ /* Reject two definitions, and reject a definition together with an external reference. */ if (!(DECL_EXTERNAL (newdecl) && DECL_EXTERNAL (olddecl))) return G_("redeclaration of %q#D"); return NULL; } } /* Hash and equality functions for the named_label table. */ hashval_t named_label_hash::hash (const value_type entry) { return IDENTIFIER_HASH_VALUE (entry->name); } bool named_label_hash::equal (const value_type entry, compare_type name) { return name == entry->name; } /* Look for a label named ID in the current function. If one cannot be found, create one. Return the named_label_entry, or NULL on failure. */ static named_label_entry * lookup_label_1 (tree id, bool making_local_p) { /* You can't use labels at global scope. */ if (current_function_decl == NULL_TREE) { error ("label %qE referenced outside of any function", id); return NULL; } if (!named_labels) named_labels = hash_table<named_label_hash>::create_ggc (13); hashval_t hash = IDENTIFIER_HASH_VALUE (id); named_label_entry **slot = named_labels->find_slot_with_hash (id, hash, INSERT); named_label_entry *old = *slot; if (old && old->label_decl) { if (!making_local_p) return old; if (old->binding_level == current_binding_level) { error ("local label %qE conflicts with existing label", id); inform (DECL_SOURCE_LOCATION (old->label_decl), "previous label"); return NULL; } } /* We are making a new decl, create or reuse the named_label_entry */ named_label_entry *ent = NULL; if (old && !old->label_decl) ent = old; else { ent = ggc_cleared_alloc<named_label_entry> (); ent->name = id; ent->outer = old; *slot = ent; } /* Now create the LABEL_DECL. */ tree decl = build_decl (input_location, LABEL_DECL, id, void_type_node); DECL_CONTEXT (decl) = current_function_decl; SET_DECL_MODE (decl, VOIDmode); if (making_local_p) { C_DECLARED_LABEL_FLAG (decl) = true; DECL_CHAIN (decl) = current_binding_level->names; current_binding_level->names = decl; } ent->label_decl = decl; return ent; } /* Wrapper for lookup_label_1. */ tree lookup_label (tree id) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); named_label_entry *ent = lookup_label_1 (id, false); timevar_cond_stop (TV_NAME_LOOKUP, subtime); return ent ? ent->label_decl : NULL_TREE; } tree declare_local_label (tree id) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); named_label_entry *ent = lookup_label_1 (id, true); timevar_cond_stop (TV_NAME_LOOKUP, subtime); return ent ? ent->label_decl : NULL_TREE; } /* Returns nonzero if it is ill-formed to jump past the declaration of DECL. Returns 2 if it's also a real problem. */ static int decl_jump_unsafe (tree decl) { /* [stmt.dcl]/3: A program that jumps from a point where a local variable with automatic storage duration is not in scope to a point where it is in scope is ill-formed unless the variable has scalar type, class type with a trivial default constructor and a trivial destructor, a cv-qualified version of one of these types, or an array of one of the preceding types and is declared without an initializer (8.5). */ tree type = TREE_TYPE (decl); if (!VAR_P (decl) || TREE_STATIC (decl) || type == error_mark_node) return 0; if (DECL_NONTRIVIALLY_INITIALIZED_P (decl) || variably_modified_type_p (type, NULL_TREE)) return 2; if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) return 1; return 0; } /* A subroutine of check_previous_goto_1 and check_goto to identify a branch to the user. */ static bool identify_goto (tree decl, location_t loc, const location_t *locus, diagnostic_t diag_kind) { bool complained = emit_diagnostic (diag_kind, loc, 0, decl ? N_("jump to label %qD") : N_("jump to case label"), decl); if (complained && locus) inform (*locus, " from here"); return complained; } /* Check that a single previously seen jump to a newly defined label is OK. DECL is the LABEL_DECL or 0; LEVEL is the binding_level for the jump context; NAMES are the names in scope in LEVEL at the jump context; LOCUS is the source position of the jump or 0. Returns true if all is well. */ static bool check_previous_goto_1 (tree decl, cp_binding_level* level, tree names, bool exited_omp, const location_t *locus) { cp_binding_level *b; bool complained = false; int identified = 0; bool saw_eh = false, saw_omp = false, saw_tm = false, saw_cxif = false; if (exited_omp) { complained = identify_goto (decl, input_location, locus, DK_ERROR); if (complained) inform (input_location, " exits OpenMP structured block"); saw_omp = true; identified = 2; } for (b = current_binding_level; b ; b = b->level_chain) { tree new_decls, old_decls = (b == level ? names : NULL_TREE); for (new_decls = b->names; new_decls != old_decls; new_decls = (DECL_P (new_decls) ? DECL_CHAIN (new_decls) : TREE_CHAIN (new_decls))) { int problem = decl_jump_unsafe (new_decls); if (! problem) continue; if (!identified) { complained = identify_goto (decl, input_location, locus, problem > 1 ? DK_ERROR : DK_PERMERROR); identified = 1; } if (complained) { if (problem > 1) inform (DECL_SOURCE_LOCATION (new_decls), " crosses initialization of %q#D", new_decls); else inform (DECL_SOURCE_LOCATION (new_decls), " enters scope of %q#D, which has " "non-trivial destructor", new_decls); } } if (b == level) break; const char *inf = NULL; location_t loc = input_location; switch (b->kind) { case sk_try: if (!saw_eh) inf = G_(" enters %<try%> block"); saw_eh = true; break; case sk_catch: if (!saw_eh) inf = G_(" enters %<catch%> block"); saw_eh = true; break; case sk_omp: if (!saw_omp) inf = G_(" enters OpenMP structured block"); saw_omp = true; break; case sk_transaction: if (!saw_tm) inf = G_(" enters synchronized or atomic statement"); saw_tm = true; break; case sk_block: if (!saw_cxif && level_for_constexpr_if (b->level_chain)) { inf = G_(" enters %<constexpr if%> statement"); loc = EXPR_LOCATION (b->level_chain->this_entity); saw_cxif = true; } break; default: break; } if (inf) { if (identified < 2) complained = identify_goto (decl, input_location, locus, DK_ERROR); identified = 2; if (complained) inform (loc, inf); } } return !identified; } static void check_previous_goto (tree decl, struct named_label_use_entry *use) { check_previous_goto_1 (decl, use->binding_level, use->names_in_scope, use->in_omp_scope, &use->o_goto_locus); } static bool check_switch_goto (cp_binding_level* level) { return check_previous_goto_1 (NULL_TREE, level, level->names, false, NULL); } /* Check that a new jump to a label DECL is OK. Called by finish_goto_stmt. */ void check_goto (tree decl) { /* We can't know where a computed goto is jumping. So we assume that it's OK. */ if (TREE_CODE (decl) != LABEL_DECL) return; /* We didn't record any information about this label when we created it, and there's not much point since it's trivial to analyze as a return. */ if (decl == cdtor_label) return; hashval_t hash = IDENTIFIER_HASH_VALUE (DECL_NAME (decl)); named_label_entry **slot = named_labels->find_slot_with_hash (DECL_NAME (decl), hash, NO_INSERT); named_label_entry *ent = *slot; /* If the label hasn't been defined yet, defer checking. */ if (! DECL_INITIAL (decl)) { /* Don't bother creating another use if the last goto had the same data, and will therefore create the same set of errors. */ if (ent->uses && ent->uses->names_in_scope == current_binding_level->names) return; named_label_use_entry *new_use = ggc_alloc<named_label_use_entry> (); new_use->binding_level = current_binding_level; new_use->names_in_scope = current_binding_level->names; new_use->o_goto_locus = input_location; new_use->in_omp_scope = false; new_use->next = ent->uses; ent->uses = new_use; return; } bool saw_catch = false, complained = false; int identified = 0; tree bad; unsigned ix; if (ent->in_try_scope || ent->in_catch_scope || ent->in_transaction_scope || ent->in_constexpr_if || ent->in_omp_scope || !vec_safe_is_empty (ent->bad_decls)) { diagnostic_t diag_kind = DK_PERMERROR; if (ent->in_try_scope || ent->in_catch_scope || ent->in_constexpr_if || ent->in_transaction_scope || ent->in_omp_scope) diag_kind = DK_ERROR; complained = identify_goto (decl, DECL_SOURCE_LOCATION (decl), &input_location, diag_kind); identified = 1 + (diag_kind == DK_ERROR); } FOR_EACH_VEC_SAFE_ELT (ent->bad_decls, ix, bad) { int u = decl_jump_unsafe (bad); if (u > 1 && DECL_ARTIFICIAL (bad)) { /* Can't skip init of __exception_info. */ if (identified == 1) { complained = identify_goto (decl, DECL_SOURCE_LOCATION (decl), &input_location, DK_ERROR); identified = 2; } if (complained) inform (DECL_SOURCE_LOCATION (bad), " enters %<catch%> block"); saw_catch = true; } else if (complained) { if (u > 1) inform (DECL_SOURCE_LOCATION (bad), " skips initialization of %q#D", bad); else inform (DECL_SOURCE_LOCATION (bad), " enters scope of %q#D which has " "non-trivial destructor", bad); } } if (complained) { if (ent->in_try_scope) inform (input_location, " enters %<try%> block"); else if (ent->in_catch_scope && !saw_catch) inform (input_location, " enters %<catch%> block"); else if (ent->in_transaction_scope) inform (input_location, " enters synchronized or atomic statement"); else if (ent->in_constexpr_if) inform (input_location, " enters %<constexpr if%> statement"); } if (ent->in_omp_scope) { if (complained) inform (input_location, " enters OpenMP structured block"); } else if (flag_openmp) for (cp_binding_level *b = current_binding_level; b ; b = b->level_chain) { if (b == ent->binding_level) break; if (b->kind == sk_omp) { if (identified < 2) { complained = identify_goto (decl, DECL_SOURCE_LOCATION (decl), &input_location, DK_ERROR); identified = 2; } if (complained) inform (input_location, " exits OpenMP structured block"); break; } } } /* Check that a return is ok wrt OpenMP structured blocks. Called by finish_return_stmt. Returns true if all is well. */ bool check_omp_return (void) { for (cp_binding_level *b = current_binding_level; b ; b = b->level_chain) if (b->kind == sk_omp) { error ("invalid exit from OpenMP structured block"); return false; } else if (b->kind == sk_function_parms) break; return true; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label. */ static tree define_label_1 (location_t location, tree name) { /* After labels, make any new cleanups in the function go into their own new (temporary) binding contour. */ for (cp_binding_level *p = current_binding_level; p->kind != sk_function_parms; p = p->level_chain) p->more_cleanups_ok = 0; named_label_entry *ent = lookup_label_1 (name, false); tree decl = ent->label_decl; if (DECL_INITIAL (decl) != NULL_TREE) { error ("duplicate label %qD", decl); return error_mark_node; } else { /* Mark label as having been defined. */ DECL_INITIAL (decl) = error_mark_node; /* Say where in the source. */ DECL_SOURCE_LOCATION (decl) = location; ent->binding_level = current_binding_level; ent->names_in_scope = current_binding_level->names; for (named_label_use_entry *use = ent->uses; use; use = use->next) check_previous_goto (decl, use); ent->uses = NULL; } return decl; } /* Wrapper for define_label_1. */ tree define_label (location_t location, tree name) { bool running = timevar_cond_start (TV_NAME_LOOKUP); tree ret = define_label_1 (location, name); timevar_cond_stop (TV_NAME_LOOKUP, running); return ret; } struct cp_switch { cp_binding_level *level; struct cp_switch *next; /* The SWITCH_STMT being built. */ tree switch_stmt; /* A splay-tree mapping the low element of a case range to the high element, or NULL_TREE if there is no high element. Used to determine whether or not a new case label duplicates an old case label. We need a tree, rather than simply a hash table, because of the GNU case range extension. */ splay_tree cases; /* Remember whether a default: case label has been seen. */ bool has_default_p; /* Remember whether a BREAK_STMT has been seen in this SWITCH_STMT. */ bool break_stmt_seen_p; /* Set if inside of {FOR,DO,WHILE}_BODY nested inside of a switch, where BREAK_STMT doesn't belong to the SWITCH_STMT. */ bool in_loop_body_p; }; /* A stack of the currently active switch statements. The innermost switch statement is on the top of the stack. There is no need to mark the stack for garbage collection because it is only active during the processing of the body of a function, and we never collect at that point. */ static struct cp_switch *switch_stack; /* Called right after a switch-statement condition is parsed. SWITCH_STMT is the switch statement being parsed. */ void push_switch (tree switch_stmt) { struct cp_switch *p = XNEW (struct cp_switch); p->level = current_binding_level; p->next = switch_stack; p->switch_stmt = switch_stmt; p->cases = splay_tree_new (case_compare, NULL, NULL); p->has_default_p = false; p->break_stmt_seen_p = false; p->in_loop_body_p = false; switch_stack = p; } void pop_switch (void) { struct cp_switch *cs = switch_stack; /* Emit warnings as needed. */ location_t switch_location = cp_expr_loc_or_input_loc (cs->switch_stmt); tree cond = SWITCH_STMT_COND (cs->switch_stmt); const bool bool_cond_p = (SWITCH_STMT_TYPE (cs->switch_stmt) && TREE_CODE (SWITCH_STMT_TYPE (cs->switch_stmt)) == BOOLEAN_TYPE); if (!processing_template_decl) c_do_switch_warnings (cs->cases, switch_location, SWITCH_STMT_TYPE (cs->switch_stmt), cond, bool_cond_p); /* For the benefit of block_may_fallthru remember if the switch body case labels cover all possible values and if there are break; stmts. */ if (cs->has_default_p || (!processing_template_decl && c_switch_covers_all_cases_p (cs->cases, SWITCH_STMT_TYPE (cs->switch_stmt)))) SWITCH_STMT_ALL_CASES_P (cs->switch_stmt) = 1; if (!cs->break_stmt_seen_p) SWITCH_STMT_NO_BREAK_P (cs->switch_stmt) = 1; /* Now that we're done with the switch warnings, set the switch type to the type of the condition if the index type was of scoped enum type. (Such types don't participate in the integer promotions.) We do this because of bit-fields whose declared type is a scoped enum type: gimplification will use the lowered index type, but convert the case values to SWITCH_STMT_TYPE, which would have been the declared type and verify_gimple_switch doesn't accept that. */ if (is_bitfield_expr_with_lowered_type (cond)) SWITCH_STMT_TYPE (cs->switch_stmt) = TREE_TYPE (cond); gcc_assert (!cs->in_loop_body_p); splay_tree_delete (cs->cases); switch_stack = switch_stack->next; free (cs); } /* Note that a BREAK_STMT is about to be added. If it is inside of a SWITCH_STMT and not inside of a loop body inside of it, note in switch_stack we've seen a BREAK_STMT. */ void note_break_stmt (void) { if (switch_stack && !switch_stack->in_loop_body_p) switch_stack->break_stmt_seen_p = true; } /* Note the start of processing of an iteration statement's body. The note_break_stmt function will do nothing while processing it. Return a flag that should be passed to note_iteration_stmt_body_end. */ bool note_iteration_stmt_body_start (void) { if (!switch_stack) return false; bool ret = switch_stack->in_loop_body_p; switch_stack->in_loop_body_p = true; return ret; } /* Note the end of processing of an iteration statement's body. */ void note_iteration_stmt_body_end (bool prev) { if (switch_stack) switch_stack->in_loop_body_p = prev; } /* Convert a case constant VALUE in a switch to the type TYPE of the switch condition. Note that if TYPE and VALUE are already integral we don't really do the conversion because the language-independent warning/optimization code will work better that way. */ static tree case_conversion (tree type, tree value) { if (value == NULL_TREE) return value; value = mark_rvalue_use (value); if (INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (type)) type = type_promotes_to (type); tree ovalue = value; /* The constant-expression VALUE shall be a converted constant expression of the adjusted type of the switch condition, which doesn't allow narrowing conversions. */ value = build_converted_constant_expr (type, value, tf_warning_or_error); if (cxx_dialect >= cxx11 && (SCOPED_ENUM_P (type) || !INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (ovalue)))) /* Use the converted value. */; else /* The already integral case. */ value = ovalue; return cxx_constant_value (value); } /* Note that we've seen a definition of a case label, and complain if this is a bad place for one. */ tree finish_case_label (location_t loc, tree low_value, tree high_value) { tree cond, r; cp_binding_level *p; tree type; if (low_value == NULL_TREE && high_value == NULL_TREE) switch_stack->has_default_p = true; if (processing_template_decl) { tree label; /* For templates, just add the case label; we'll do semantic analysis at instantiation-time. */ label = build_decl (loc, LABEL_DECL, NULL_TREE, void_type_node); return add_stmt (build_case_label (low_value, high_value, label)); } /* Find the condition on which this switch statement depends. */ cond = SWITCH_STMT_COND (switch_stack->switch_stmt); if (cond && TREE_CODE (cond) == TREE_LIST) cond = TREE_VALUE (cond); if (!check_switch_goto (switch_stack->level)) return error_mark_node; type = SWITCH_STMT_TYPE (switch_stack->switch_stmt); if (type == error_mark_node) return error_mark_node; low_value = case_conversion (type, low_value); high_value = case_conversion (type, high_value); r = c_add_case_label (loc, switch_stack->cases, cond, low_value, high_value); /* After labels, make any new cleanups in the function go into their own new (temporary) binding contour. */ for (p = current_binding_level; p->kind != sk_function_parms; p = p->level_chain) p->more_cleanups_ok = 0; return r; } struct typename_info { tree scope; tree name; tree template_id; bool enum_p; bool class_p; }; struct typename_hasher : ggc_ptr_hash<tree_node> { typedef typename_info *compare_type; /* Hash a TYPENAME_TYPE. */ static hashval_t hash (tree t) { hashval_t hash; hash = (htab_hash_pointer (TYPE_CONTEXT (t)) ^ htab_hash_pointer (TYPE_IDENTIFIER (t))); return hash; } /* Compare two TYPENAME_TYPEs. */ static bool equal (tree t1, const typename_info *t2) { return (TYPE_IDENTIFIER (t1) == t2->name && TYPE_CONTEXT (t1) == t2->scope && TYPENAME_TYPE_FULLNAME (t1) == t2->template_id && TYPENAME_IS_ENUM_P (t1) == t2->enum_p && TYPENAME_IS_CLASS_P (t1) == t2->class_p); } }; /* Build a TYPENAME_TYPE. If the type is `typename T::t', CONTEXT is the type of `T', NAME is the IDENTIFIER_NODE for `t'. Returns the new TYPENAME_TYPE. */ static GTY (()) hash_table<typename_hasher> *typename_htab; tree build_typename_type (tree context, tree name, tree fullname, enum tag_types tag_type) { tree t; tree d; typename_info ti; tree *e; hashval_t hash; if (typename_htab == NULL) typename_htab = hash_table<typename_hasher>::create_ggc (61); ti.scope = FROB_CONTEXT (context); ti.name = name; ti.template_id = fullname; ti.enum_p = tag_type == enum_type; ti.class_p = (tag_type == class_type || tag_type == record_type || tag_type == union_type); hash = (htab_hash_pointer (ti.scope) ^ htab_hash_pointer (ti.name)); /* See if we already have this type. */ e = typename_htab->find_slot_with_hash (&ti, hash, INSERT); if (*e) t = *e; else { /* Build the TYPENAME_TYPE. */ t = cxx_make_type (TYPENAME_TYPE); TYPE_CONTEXT (t) = ti.scope; TYPENAME_TYPE_FULLNAME (t) = ti.template_id; TYPENAME_IS_ENUM_P (t) = ti.enum_p; TYPENAME_IS_CLASS_P (t) = ti.class_p; /* Build the corresponding TYPE_DECL. */ d = build_decl (input_location, TYPE_DECL, name, t); TYPE_NAME (TREE_TYPE (d)) = d; TYPE_STUB_DECL (TREE_TYPE (d)) = d; DECL_CONTEXT (d) = FROB_CONTEXT (context); DECL_ARTIFICIAL (d) = 1; /* Store it in the hash table. */ *e = t; /* TYPENAME_TYPEs must always be compared structurally, because they may or may not resolve down to another type depending on the currently open classes. */ SET_TYPE_STRUCTURAL_EQUALITY (t); } return t; } /* Resolve `typename CONTEXT::NAME'. TAG_TYPE indicates the tag provided to name the type. Returns an appropriate type, unless an error occurs, in which case error_mark_node is returned. If we locate a non-artificial TYPE_DECL and TF_KEEP_TYPE_DECL is set, we return that, rather than the _TYPE it corresponds to, in other cases we look through the type decl. If TF_ERROR is set, complain about errors, otherwise be quiet. */ tree make_typename_type (tree context, tree name, enum tag_types tag_type, tsubst_flags_t complain) { tree fullname; tree t; bool want_template; if (name == error_mark_node || context == NULL_TREE || context == error_mark_node) return error_mark_node; if (TYPE_P (name)) { if (!(TYPE_LANG_SPECIFIC (name) && (CLASSTYPE_IS_TEMPLATE (name) || CLASSTYPE_USE_TEMPLATE (name)))) name = TYPE_IDENTIFIER (name); else /* Create a TEMPLATE_ID_EXPR for the type. */ name = build_nt (TEMPLATE_ID_EXPR, CLASSTYPE_TI_TEMPLATE (name), CLASSTYPE_TI_ARGS (name)); } else if (TREE_CODE (name) == TYPE_DECL) name = DECL_NAME (name); fullname = name; if (TREE_CODE (name) == TEMPLATE_ID_EXPR) { name = TREE_OPERAND (name, 0); if (DECL_TYPE_TEMPLATE_P (name)) name = TREE_OPERAND (fullname, 0) = DECL_NAME (name); if (TREE_CODE (name) != IDENTIFIER_NODE) { if (complain & tf_error) error ("%qD is not a type", name); return error_mark_node; } } if (TREE_CODE (name) == TEMPLATE_DECL) { if (complain & tf_error) error ("%qD used without template arguments", name); return error_mark_node; } gcc_assert (identifier_p (name)); gcc_assert (TYPE_P (context)); if (TREE_CODE (context) == TYPE_PACK_EXPANSION) /* This can happen for C++17 variadic using (c++/88986). */; else if (!MAYBE_CLASS_TYPE_P (context)) { if (complain & tf_error) error ("%q#T is not a class", context); return error_mark_node; } /* When the CONTEXT is a dependent type, NAME could refer to a dependent base class of CONTEXT. But look inside it anyway if CONTEXT is a currently open scope, in case it refers to a member of the current instantiation or a non-dependent base; lookup will stop when we hit a dependent base. */ if (!dependent_scope_p (context)) /* We should only set WANT_TYPE when we're a nested typename type. Then we can give better diagnostics if we find a non-type. */ t = lookup_field (context, name, 2, /*want_type=*/true); else t = NULL_TREE; if ((!t || TREE_CODE (t) == TREE_LIST) && dependent_type_p (context)) return build_typename_type (context, name, fullname, tag_type); want_template = TREE_CODE (fullname) == TEMPLATE_ID_EXPR; if (!t) { if (complain & tf_error) { if (!COMPLETE_TYPE_P (context)) cxx_incomplete_type_error (NULL_TREE, context); else error (want_template ? G_("no class template named %q#T in %q#T") : G_("no type named %q#T in %q#T"), name, context); } return error_mark_node; } /* Pull out the template from an injected-class-name (or multiple). */ if (want_template) t = maybe_get_template_decl_from_type_decl (t); if (TREE_CODE (t) == TREE_LIST) { if (complain & tf_error) { error ("lookup of %qT in %qT is ambiguous", name, context); print_candidates (t); } return error_mark_node; } if (want_template && !DECL_TYPE_TEMPLATE_P (t)) { if (complain & tf_error) error ("%<typename %T::%D%> names %q#T, which is not a class template", context, name, t); return error_mark_node; } if (!want_template && TREE_CODE (t) != TYPE_DECL) { if (complain & tf_error) error ("%<typename %T::%D%> names %q#T, which is not a type", context, name, t); return error_mark_node; } if (!perform_or_defer_access_check (TYPE_BINFO (context), t, t, complain)) return error_mark_node; /* If we are currently parsing a template and if T is a typedef accessed through CONTEXT then we need to remember and check access of T at template instantiation time. */ add_typedef_to_current_template_for_access_check (t, context, input_location); if (want_template) return lookup_template_class (t, TREE_OPERAND (fullname, 1), NULL_TREE, context, /*entering_scope=*/0, complain | tf_user); if (DECL_ARTIFICIAL (t) || !(complain & tf_keep_type_decl)) t = TREE_TYPE (t); maybe_record_typedef_use (t); return t; } /* Resolve `CONTEXT::template NAME'. Returns a TEMPLATE_DECL if the name can be resolved or an UNBOUND_CLASS_TEMPLATE, unless an error occurs, in which case error_mark_node is returned. If PARM_LIST is non-NULL, also make sure that the template parameter list of TEMPLATE_DECL matches. If COMPLAIN zero, don't complain about any errors that occur. */ tree make_unbound_class_template (tree context, tree name, tree parm_list, tsubst_flags_t complain) { tree t; tree d; if (TYPE_P (name)) name = TYPE_IDENTIFIER (name); else if (DECL_P (name)) name = DECL_NAME (name); gcc_assert (identifier_p (name)); if (!dependent_type_p (context) || currently_open_class (context)) { tree tmpl = NULL_TREE; if (MAYBE_CLASS_TYPE_P (context)) tmpl = lookup_field (context, name, 0, false); if (tmpl && TREE_CODE (tmpl) == TYPE_DECL) tmpl = maybe_get_template_decl_from_type_decl (tmpl); if (!tmpl || !DECL_TYPE_TEMPLATE_P (tmpl)) { if (complain & tf_error) error ("no class template named %q#T in %q#T", name, context); return error_mark_node; } if (parm_list && !comp_template_parms (DECL_TEMPLATE_PARMS (tmpl), parm_list)) { if (complain & tf_error) { error ("template parameters do not match template %qD", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); } return error_mark_node; } if (!perform_or_defer_access_check (TYPE_BINFO (context), tmpl, tmpl, complain)) return error_mark_node; return tmpl; } /* Build the UNBOUND_CLASS_TEMPLATE. */ t = cxx_make_type (UNBOUND_CLASS_TEMPLATE); TYPE_CONTEXT (t) = FROB_CONTEXT (context); TREE_TYPE (t) = NULL_TREE; SET_TYPE_STRUCTURAL_EQUALITY (t); /* Build the corresponding TEMPLATE_DECL. */ d = build_decl (input_location, TEMPLATE_DECL, name, t); TYPE_NAME (TREE_TYPE (d)) = d; TYPE_STUB_DECL (TREE_TYPE (d)) = d; DECL_CONTEXT (d) = FROB_CONTEXT (context); DECL_ARTIFICIAL (d) = 1; DECL_TEMPLATE_PARMS (d) = parm_list; return t; } /* Push the declarations of builtin types into the global namespace. RID_INDEX is the index of the builtin type in the array RID_POINTERS. NAME is the name used when looking up the builtin type. TYPE is the _TYPE node for the builtin type. The calls to set_global_binding below should be eliminated. Built-in types should not be looked up name; their names are keywords that the parser can recognize. However, there is code in c-common.c that uses identifier_global_value to look up built-in types by name. */ void record_builtin_type (enum rid rid_index, const char* name, tree type) { tree decl = NULL_TREE; if (name) { tree tname = get_identifier (name); tree tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL, tname, type); DECL_ARTIFICIAL (tdecl) = 1; set_global_binding (tdecl); decl = tdecl; } if ((int) rid_index < (int) RID_MAX) if (tree rname = ridpointers[(int) rid_index]) if (!decl || DECL_NAME (decl) != rname) { tree rdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL, rname, type); DECL_ARTIFICIAL (rdecl) = 1; set_global_binding (rdecl); if (!decl) decl = rdecl; } if (decl) { if (!TYPE_NAME (type)) TYPE_NAME (type) = decl; debug_hooks->type_decl (decl, 0); } } /* Push a type into the namespace so that the back ends ignore it. */ static void record_unknown_type (tree type, const char* name) { tree decl = pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier (name), type)); /* Make sure the "unknown type" typedecl gets ignored for debug info. */ DECL_IGNORED_P (decl) = 1; TYPE_DECL_SUPPRESS_DEBUG (decl) = 1; TYPE_SIZE (type) = TYPE_SIZE (void_type_node); SET_TYPE_ALIGN (type, 1); TYPE_USER_ALIGN (type) = 0; SET_TYPE_MODE (type, TYPE_MODE (void_type_node)); } /* Create all the predefined identifiers. */ static void initialize_predefined_identifiers (void) { struct predefined_identifier { const char *name; /* Name. */ tree *node; /* Node to store it in. */ cp_identifier_kind kind; /* Kind of identifier. */ }; /* A table of identifiers to create at startup. */ static const predefined_identifier predefined_identifiers[] = { {"C++", &lang_name_cplusplus, cik_normal}, {"C", &lang_name_c, cik_normal}, /* Some of these names have a trailing space so that it is impossible for them to conflict with names written by users. */ {"__ct ", &ctor_identifier, cik_ctor}, {"__ct_base ", &base_ctor_identifier, cik_ctor}, {"__ct_comp ", &complete_ctor_identifier, cik_ctor}, {"__dt ", &dtor_identifier, cik_dtor}, {"__dt_base ", &base_dtor_identifier, cik_dtor}, {"__dt_comp ", &complete_dtor_identifier, cik_dtor}, {"__dt_del ", &deleting_dtor_identifier, cik_dtor}, {"__conv_op ", &conv_op_identifier, cik_conv_op}, {"__in_chrg", &in_charge_identifier, cik_normal}, {"this", &this_identifier, cik_normal}, {"__delta", &delta_identifier, cik_normal}, {"__pfn", &pfn_identifier, cik_normal}, {"_vptr", &vptr_identifier, cik_normal}, {"__vtt_parm", &vtt_parm_identifier, cik_normal}, {"::", &global_identifier, cik_normal}, /* The demangler expects anonymous namespaces to be called something starting with '_GLOBAL__N_'. It no longer needs to be unique to the TU. */ {"_GLOBAL__N_1", &anon_identifier, cik_normal}, {"auto", &auto_identifier, cik_normal}, {"decltype(auto)", &decltype_auto_identifier, cik_normal}, {"initializer_list", &init_list_identifier, cik_normal}, {"__for_range ", &for_range__identifier, cik_normal}, {"__for_begin ", &for_begin__identifier, cik_normal}, {"__for_end ", &for_end__identifier, cik_normal}, {"__for_range", &for_range_identifier, cik_normal}, {"__for_begin", &for_begin_identifier, cik_normal}, {"__for_end", &for_end_identifier, cik_normal}, {"abi_tag", &abi_tag_identifier, cik_normal}, {"aligned", &aligned_identifier, cik_normal}, {"begin", &begin_identifier, cik_normal}, {"end", &end_identifier, cik_normal}, {"get", &get__identifier, cik_normal}, {"gnu", &gnu_identifier, cik_normal}, {"tuple_element", &tuple_element_identifier, cik_normal}, {"tuple_size", &tuple_size_identifier, cik_normal}, {"type", &type_identifier, cik_normal}, {"value", &value_identifier, cik_normal}, {"_FUN", &fun_identifier, cik_normal}, {"__closure", &closure_identifier, cik_normal}, {"heap uninit", &heap_uninit_identifier, cik_normal}, {"heap ", &heap_identifier, cik_normal}, {"heap deleted", &heap_deleted_identifier, cik_normal}, {NULL, NULL, cik_normal} }; for (const predefined_identifier *pid = predefined_identifiers; pid->name; ++pid) { *pid->node = get_identifier (pid->name); /* Some of these identifiers already have a special kind. */ if (pid->kind != cik_normal) set_identifier_kind (*pid->node, pid->kind); } } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *)0). Initialize the global binding level. Make definitions for built-in primitive functions. */ void cxx_init_decl_processing (void) { tree void_ftype; tree void_ftype_ptr; /* Create all the identifiers we need. */ initialize_predefined_identifiers (); /* Create the global variables. */ push_to_top_level (); current_function_decl = NULL_TREE; current_binding_level = NULL; /* Enter the global namespace. */ gcc_assert (global_namespace == NULL_TREE); global_namespace = build_lang_decl (NAMESPACE_DECL, global_identifier, void_type_node); TREE_PUBLIC (global_namespace) = 1; DECL_CONTEXT (global_namespace) = build_translation_unit_decl (get_identifier (main_input_filename)); /* Remember whether we want the empty class passing ABI change warning in this TU. */ TRANSLATION_UNIT_WARN_EMPTY_P (DECL_CONTEXT (global_namespace)) = warn_abi && abi_version_crosses (12); debug_hooks->register_main_translation_unit (DECL_CONTEXT (global_namespace)); begin_scope (sk_namespace, global_namespace); current_namespace = global_namespace; if (flag_visibility_ms_compat) default_visibility = VISIBILITY_HIDDEN; /* Initially, C. */ current_lang_name = lang_name_c; /* Create the `std' namespace. */ push_namespace (get_identifier ("std")); std_node = current_namespace; pop_namespace (); flag_noexcept_type = (cxx_dialect >= cxx17); c_common_nodes_and_builtins (); tree bool_ftype = build_function_type_list (boolean_type_node, NULL_TREE); tree decl = add_builtin_function ("__builtin_is_constant_evaluated", bool_ftype, CP_BUILT_IN_IS_CONSTANT_EVALUATED, BUILT_IN_FRONTEND, NULL, NULL_TREE); set_call_expr_flags (decl, ECF_CONST | ECF_NOTHROW | ECF_LEAF); tree cptr_ftype = build_function_type_list (const_ptr_type_node, NULL_TREE); decl = add_builtin_function ("__builtin_source_location", cptr_ftype, CP_BUILT_IN_SOURCE_LOCATION, BUILT_IN_FRONTEND, NULL, NULL_TREE); set_call_expr_flags (decl, ECF_CONST | ECF_NOTHROW | ECF_LEAF); integer_two_node = build_int_cst (NULL_TREE, 2); /* Guess at the initial static decls size. */ vec_alloc (static_decls, 500); /* ... and keyed classes. */ vec_alloc (keyed_classes, 100); record_builtin_type (RID_BOOL, "bool", boolean_type_node); truthvalue_type_node = boolean_type_node; truthvalue_false_node = boolean_false_node; truthvalue_true_node = boolean_true_node; empty_except_spec = build_tree_list (NULL_TREE, NULL_TREE); noexcept_true_spec = build_tree_list (boolean_true_node, NULL_TREE); noexcept_false_spec = build_tree_list (boolean_false_node, NULL_TREE); noexcept_deferred_spec = build_tree_list (make_node (DEFERRED_NOEXCEPT), NULL_TREE); #if 0 record_builtin_type (RID_MAX, NULL, string_type_node); #endif delta_type_node = ptrdiff_type_node; vtable_index_type = ptrdiff_type_node; vtt_parm_type = build_pointer_type (const_ptr_type_node); void_ftype = build_function_type_list (void_type_node, NULL_TREE); void_ftype_ptr = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); void_ftype_ptr = build_exception_variant (void_ftype_ptr, empty_except_spec); /* Create the conversion operator marker. This operator's DECL_NAME is in the identifier table, so we can use identifier equality to find it. */ conv_op_marker = build_lang_decl (FUNCTION_DECL, conv_op_identifier, void_ftype); /* C++ extensions */ unknown_type_node = make_node (LANG_TYPE); record_unknown_type (unknown_type_node, "unknown type"); /* Indirecting an UNKNOWN_TYPE node yields an UNKNOWN_TYPE node. */ TREE_TYPE (unknown_type_node) = unknown_type_node; /* Looking up TYPE_POINTER_TO and TYPE_REFERENCE_TO yield the same result. */ TYPE_POINTER_TO (unknown_type_node) = unknown_type_node; TYPE_REFERENCE_TO (unknown_type_node) = unknown_type_node; init_list_type_node = make_node (LANG_TYPE); record_unknown_type (init_list_type_node, "init list"); { /* Make sure we get a unique function type, so we can give its pointer type a name. (This wins for gdb.) */ tree vfunc_type = make_node (FUNCTION_TYPE); TREE_TYPE (vfunc_type) = integer_type_node; TYPE_ARG_TYPES (vfunc_type) = NULL_TREE; layout_type (vfunc_type); vtable_entry_type = build_pointer_type (vfunc_type); } record_builtin_type (RID_MAX, "__vtbl_ptr_type", vtable_entry_type); vtbl_type_node = build_cplus_array_type (vtable_entry_type, NULL_TREE); layout_type (vtbl_type_node); vtbl_type_node = cp_build_qualified_type (vtbl_type_node, TYPE_QUAL_CONST); record_builtin_type (RID_MAX, NULL, vtbl_type_node); vtbl_ptr_type_node = build_pointer_type (vtable_entry_type); layout_type (vtbl_ptr_type_node); record_builtin_type (RID_MAX, NULL, vtbl_ptr_type_node); push_namespace (get_identifier ("__cxxabiv1")); abi_node = current_namespace; pop_namespace (); global_type_node = make_node (LANG_TYPE); record_unknown_type (global_type_node, "global type"); any_targ_node = make_node (LANG_TYPE); record_unknown_type (any_targ_node, "any type"); /* Now, C++. */ current_lang_name = lang_name_cplusplus; if (aligned_new_threshold > 1 && !pow2p_hwi (aligned_new_threshold)) { error ("%<-faligned-new=%d%> is not a power of two", aligned_new_threshold); aligned_new_threshold = 1; } if (aligned_new_threshold == -1) aligned_new_threshold = (cxx_dialect >= cxx17) ? 1 : 0; if (aligned_new_threshold == 1) aligned_new_threshold = malloc_alignment () / BITS_PER_UNIT; { tree newattrs, extvisattr; tree newtype, deltype; tree ptr_ftype_sizetype; tree new_eh_spec; ptr_ftype_sizetype = build_function_type_list (ptr_type_node, size_type_node, NULL_TREE); if (cxx_dialect == cxx98) { tree bad_alloc_id; tree bad_alloc_type_node; tree bad_alloc_decl; push_nested_namespace (std_node); bad_alloc_id = get_identifier ("bad_alloc"); bad_alloc_type_node = make_class_type (RECORD_TYPE); TYPE_CONTEXT (bad_alloc_type_node) = current_namespace; bad_alloc_decl = create_implicit_typedef (bad_alloc_id, bad_alloc_type_node); DECL_CONTEXT (bad_alloc_decl) = current_namespace; pop_nested_namespace (std_node); new_eh_spec = add_exception_specifier (NULL_TREE, bad_alloc_type_node, -1); } else new_eh_spec = noexcept_false_spec; /* Ensure attribs.c is initialized. */ init_attributes (); extvisattr = build_tree_list (get_identifier ("externally_visible"), NULL_TREE); newattrs = tree_cons (get_identifier ("alloc_size"), build_tree_list (NULL_TREE, integer_one_node), extvisattr); newtype = cp_build_type_attribute_variant (ptr_ftype_sizetype, newattrs); newtype = build_exception_variant (newtype, new_eh_spec); deltype = cp_build_type_attribute_variant (void_ftype_ptr, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); tree opnew = push_cp_library_fn (NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; opnew = push_cp_library_fn (VEC_NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; tree opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; if (flag_sized_deallocation) { /* Also push the sized deallocation variants: void operator delete(void*, std::size_t) throw(); void operator delete[](void*, std::size_t) throw(); */ tree void_ftype_ptr_size = build_function_type_list (void_type_node, ptr_type_node, size_type_node, NULL_TREE); deltype = cp_build_type_attribute_variant (void_ftype_ptr_size, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; } if (aligned_new_threshold) { push_nested_namespace (std_node); tree align_id = get_identifier ("align_val_t"); align_type_node = start_enum (align_id, NULL_TREE, size_type_node, NULL_TREE, /*scoped*/true, NULL); pop_nested_namespace (std_node); /* operator new (size_t, align_val_t); */ newtype = build_function_type_list (ptr_type_node, size_type_node, align_type_node, NULL_TREE); newtype = cp_build_type_attribute_variant (newtype, newattrs); newtype = build_exception_variant (newtype, new_eh_spec); opnew = push_cp_library_fn (NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; opnew = push_cp_library_fn (VEC_NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; /* operator delete (void *, align_val_t); */ deltype = build_function_type_list (void_type_node, ptr_type_node, align_type_node, NULL_TREE); deltype = cp_build_type_attribute_variant (deltype, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; if (flag_sized_deallocation) { /* operator delete (void *, size_t, align_val_t); */ deltype = build_function_type_list (void_type_node, ptr_type_node, size_type_node, align_type_node, NULL_TREE); deltype = cp_build_type_attribute_variant (deltype, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; } } nullptr_type_node = make_node (NULLPTR_TYPE); TYPE_SIZE (nullptr_type_node) = bitsize_int (GET_MODE_BITSIZE (ptr_mode)); TYPE_SIZE_UNIT (nullptr_type_node) = size_int (GET_MODE_SIZE (ptr_mode)); TYPE_UNSIGNED (nullptr_type_node) = 1; TYPE_PRECISION (nullptr_type_node) = GET_MODE_BITSIZE (ptr_mode); if (abi_version_at_least (9)) SET_TYPE_ALIGN (nullptr_type_node, GET_MODE_ALIGNMENT (ptr_mode)); SET_TYPE_MODE (nullptr_type_node, ptr_mode); record_builtin_type (RID_MAX, "decltype(nullptr)", nullptr_type_node); nullptr_node = build_int_cst (nullptr_type_node, 0); } abort_fndecl = build_library_fn_ptr ("__cxa_pure_virtual", void_ftype, ECF_NORETURN | ECF_NOTHROW | ECF_COLD); /* Perform other language dependent initializations. */ init_class_processing (); init_rtti_processing (); init_template_processing (); if (flag_exceptions) init_exception_processing (); if (! supports_one_only ()) flag_weak = 0; make_fname_decl = cp_make_fname_decl; start_fname_decls (); /* Show we use EH for cleanups. */ if (flag_exceptions) using_eh_for_cleanups (); } /* Generate an initializer for a function naming variable from NAME. NAME may be NULL, to indicate a dependent name. TYPE_P is filled in with the type of the init. */ tree cp_fname_init (const char* name, tree *type_p) { tree domain = NULL_TREE; tree type; tree init = NULL_TREE; size_t length = 0; if (name) { length = strlen (name); domain = build_index_type (size_int (length)); init = build_string (length + 1, name); } type = cp_build_qualified_type (char_type_node, TYPE_QUAL_CONST); type = build_cplus_array_type (type, domain); *type_p = type; if (init) TREE_TYPE (init) = type; else init = error_mark_node; return init; } /* Create the VAR_DECL for __FUNCTION__ etc. ID is the name to give the decl, LOC is the location to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. We make use of that to detect __PRETTY_FUNCTION__ inside a template fn. This is being done lazily at the point of first use, so we mustn't push the decl now. */ static tree cp_make_fname_decl (location_t loc, tree id, int type_dep) { const char * name = NULL; bool release_name = false; if (!(type_dep && in_template_function ())) { if (current_function_decl == NULL_TREE) name = "top level"; else if (type_dep == 1) /* __PRETTY_FUNCTION__ */ name = cxx_printable_name (current_function_decl, 2); else if (type_dep == 0) /* __FUNCTION__ */ { name = fname_as_string (type_dep); release_name = true; } else gcc_unreachable (); } tree type; tree init = cp_fname_init (name, &type); tree decl = build_decl (loc, VAR_DECL, id, type); if (release_name) free (CONST_CAST (char *, name)); /* As we're using pushdecl_with_scope, we must set the context. */ DECL_CONTEXT (decl) = current_function_decl; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_DECLARED_CONSTEXPR_P (decl) = 1; TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; if (init) { SET_DECL_VALUE_EXPR (decl, init); DECL_HAS_VALUE_EXPR_P (decl) = 1; /* For decl_constant_var_p. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = 1; } if (current_function_decl) { DECL_CONTEXT (decl) = current_function_decl; decl = pushdecl_outermost_localscope (decl); if (decl != error_mark_node) add_decl_expr (decl); } else { DECL_THIS_STATIC (decl) = true; pushdecl_top_level_and_finish (decl, NULL_TREE); } return decl; } /* Install DECL as a builtin function at current (global) scope. Return the new decl (if we found an existing version). Also installs it into ::std, if it's not '_*'. */ tree cxx_builtin_function (tree decl) { retrofit_lang_decl (decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_LANGUAGE (decl, lang_c); /* Runtime library routines are, by definition, available in an external shared object. */ DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; DECL_VISIBILITY_SPECIFIED (decl) = 1; tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); if (name[0] != '_' || name[1] != '_') /* In the user's namespace, it must be declared before use. */ DECL_ANTICIPATED (decl) = 1; else if (IDENTIFIER_LENGTH (id) > strlen ("___chk") && 0 != strncmp (name + 2, "builtin_", strlen ("builtin_")) && 0 == memcmp (name + IDENTIFIER_LENGTH (id) - strlen ("_chk"), "_chk", strlen ("_chk") + 1)) /* Treat __*_chk fortification functions as anticipated as well, unless they are __builtin_*_chk. */ DECL_ANTICIPATED (decl) = 1; /* All builtins that don't begin with an '_' should additionally go in the 'std' namespace. */ if (name[0] != '_') { tree std_decl = copy_decl (decl); push_nested_namespace (std_node); DECL_CONTEXT (std_decl) = FROB_CONTEXT (std_node); pushdecl (std_decl); pop_nested_namespace (std_node); } DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); decl = pushdecl (decl); return decl; } /* Like cxx_builtin_function, but guarantee the function is added to the global scope. This is to allow function specific options to add new machine dependent builtins when the target ISA changes via attribute((target(...))) which saves space on program startup if the program does not use non-generic ISAs. */ tree cxx_builtin_function_ext_scope (tree decl) { push_nested_namespace (global_namespace); decl = cxx_builtin_function (decl); pop_nested_namespace (global_namespace); return decl; } /* Implement LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL. */ tree cxx_simulate_builtin_function_decl (tree decl) { retrofit_lang_decl (decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_LANGUAGE (decl, lang_cplusplus); DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); return pushdecl (decl); } /* Generate a FUNCTION_DECL with the typical flags for a runtime library function. Not called directly. */ static tree build_library_fn (tree name, enum tree_code operator_code, tree type, int ecf_flags) { tree fn = build_lang_decl (FUNCTION_DECL, name, type); DECL_EXTERNAL (fn) = 1; TREE_PUBLIC (fn) = 1; DECL_ARTIFICIAL (fn) = 1; DECL_OVERLOADED_OPERATOR_CODE_RAW (fn) = OVL_OP_INFO (false, operator_code)->ovl_op_code; SET_DECL_LANGUAGE (fn, lang_c); /* Runtime library routines are, by definition, available in an external shared object. */ DECL_VISIBILITY (fn) = VISIBILITY_DEFAULT; DECL_VISIBILITY_SPECIFIED (fn) = 1; set_call_expr_flags (fn, ecf_flags); return fn; } /* Returns the _DECL for a library function with C++ linkage. */ static tree build_cp_library_fn (tree name, enum tree_code operator_code, tree type, int ecf_flags) { tree fn = build_library_fn (name, operator_code, type, ecf_flags); DECL_CONTEXT (fn) = FROB_CONTEXT (current_namespace); SET_DECL_LANGUAGE (fn, lang_cplusplus); return fn; } /* Like build_library_fn, but takes a C string instead of an IDENTIFIER_NODE. */ tree build_library_fn_ptr (const char* name, tree type, int ecf_flags) { return build_library_fn (get_identifier (name), ERROR_MARK, type, ecf_flags); } /* Like build_cp_library_fn, but takes a C string instead of an IDENTIFIER_NODE. */ tree build_cp_library_fn_ptr (const char* name, tree type, int ecf_flags) { return build_cp_library_fn (get_identifier (name), ERROR_MARK, type, ecf_flags); } /* Like build_library_fn, but also pushes the function so that we will be able to find it via get_global_binding. Also, the function may throw exceptions listed in RAISES. */ tree push_library_fn (tree name, tree type, tree raises, int ecf_flags) { tree fn; if (raises) type = build_exception_variant (type, raises); fn = build_library_fn (name, ERROR_MARK, type, ecf_flags); pushdecl_top_level (fn); return fn; } /* Like build_cp_library_fn, but also pushes the function so that it will be found by normal lookup. */ static tree push_cp_library_fn (enum tree_code operator_code, tree type, int ecf_flags) { tree fn = build_cp_library_fn (ovl_op_identifier (false, operator_code), operator_code, type, ecf_flags); pushdecl (fn); if (flag_tm) apply_tm_attr (fn, get_identifier ("transaction_safe")); return fn; } /* Like push_library_fn, but takes a TREE_LIST of parm types rather than a FUNCTION_TYPE. */ tree push_void_library_fn (tree name, tree parmtypes, int ecf_flags) { tree type = build_function_type (void_type_node, parmtypes); return push_library_fn (name, type, NULL_TREE, ecf_flags); } /* Like push_library_fn, but also note that this function throws and does not return. Used for __throw_foo and the like. */ tree push_throw_library_fn (tree name, tree type) { tree fn = push_library_fn (name, type, NULL_TREE, ECF_NORETURN | ECF_COLD); return fn; } /* When we call finish_struct for an anonymous union, we create default copy constructors and such. But, an anonymous union shouldn't have such things; this function undoes the damage to the anonymous union type T. (The reason that we create the synthesized methods is that we don't distinguish `union { int i; }' from `typedef union { int i; } U'. The first is an anonymous union; the second is just an ordinary union type.) */ void fixup_anonymous_aggr (tree t) { /* Wipe out memory of synthesized methods. */ TYPE_HAS_USER_CONSTRUCTOR (t) = 0; TYPE_HAS_DEFAULT_CONSTRUCTOR (t) = 0; TYPE_HAS_COPY_CTOR (t) = 0; TYPE_HAS_CONST_COPY_CTOR (t) = 0; TYPE_HAS_COPY_ASSIGN (t) = 0; TYPE_HAS_CONST_COPY_ASSIGN (t) = 0; /* Splice the implicitly generated functions out of TYPE_FIELDS. */ for (tree probe, *prev_p = &TYPE_FIELDS (t); (probe = *prev_p);) if (TREE_CODE (probe) == FUNCTION_DECL && DECL_ARTIFICIAL (probe)) *prev_p = DECL_CHAIN (probe); else prev_p = &DECL_CHAIN (probe); /* Anonymous aggregates cannot have fields with ctors, dtors or complex assignment operators (because they cannot have these methods themselves). For anonymous unions this is already checked because they are not allowed in any union, otherwise we have to check it. */ if (TREE_CODE (t) != UNION_TYPE) { tree field, type; for (field = TYPE_FIELDS (t); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { type = TREE_TYPE (field); if (CLASS_TYPE_P (type)) { if (TYPE_NEEDS_CONSTRUCTING (type)) error ("member %q+#D with constructor not allowed " "in anonymous aggregate", field); if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) error ("member %q+#D with destructor not allowed " "in anonymous aggregate", field); if (TYPE_HAS_COMPLEX_COPY_ASSIGN (type)) error ("member %q+#D with copy assignment operator " "not allowed in anonymous aggregate", field); } } } } /* Warn for an attribute located at LOCATION that appertains to the class type CLASS_TYPE that has not been properly placed after its class-key, in it class-specifier. */ void warn_misplaced_attr_for_class_type (location_t location, tree class_type) { gcc_assert (OVERLOAD_TYPE_P (class_type)); auto_diagnostic_group d; if (warning_at (location, OPT_Wattributes, "attribute ignored in declaration " "of %q#T", class_type)) inform (location, "attribute for %q#T must follow the %qs keyword", class_type, class_key_or_enum_as_string (class_type)); } /* Returns the cv-qualifiers that apply to the type specified by the DECLSPECS. */ static int get_type_quals (const cp_decl_specifier_seq *declspecs) { int type_quals = TYPE_UNQUALIFIED; if (decl_spec_seq_has_spec_p (declspecs, ds_const)) type_quals |= TYPE_QUAL_CONST; if (decl_spec_seq_has_spec_p (declspecs, ds_volatile)) type_quals |= TYPE_QUAL_VOLATILE; if (decl_spec_seq_has_spec_p (declspecs, ds_restrict)) type_quals |= TYPE_QUAL_RESTRICT; return type_quals; } /* Make sure that a declaration with no declarator is well-formed, i.e. just declares a tagged type or anonymous union. Returns the type declared; or NULL_TREE if none. */ tree check_tag_decl (cp_decl_specifier_seq *declspecs, bool explicit_type_instantiation_p) { int saw_friend = decl_spec_seq_has_spec_p (declspecs, ds_friend); int saw_typedef = decl_spec_seq_has_spec_p (declspecs, ds_typedef); /* If a class, struct, or enum type is declared by the DECLSPECS (i.e, if a class-specifier, enum-specifier, or non-typename elaborated-type-specifier appears in the DECLSPECS), DECLARED_TYPE is set to the corresponding type. */ tree declared_type = NULL_TREE; bool error_p = false; if (declspecs->multiple_types_p) error_at (smallest_type_location (declspecs), "multiple types in one declaration"); else if (declspecs->redefined_builtin_type) { location_t loc = declspecs->locations[ds_redefined_builtin_type_spec]; if (!in_system_header_at (loc)) permerror (loc, "redeclaration of C++ built-in type %qT", declspecs->redefined_builtin_type); return NULL_TREE; } if (declspecs->type && TYPE_P (declspecs->type) && ((TREE_CODE (declspecs->type) != TYPENAME_TYPE && MAYBE_CLASS_TYPE_P (declspecs->type)) || TREE_CODE (declspecs->type) == ENUMERAL_TYPE)) declared_type = declspecs->type; else if (declspecs->type == error_mark_node) error_p = true; if (type_uses_auto (declared_type)) { error_at (declspecs->locations[ds_type_spec], "%<auto%> can only be specified for variables " "or function declarations"); return error_mark_node; } if (declared_type && !OVERLOAD_TYPE_P (declared_type)) declared_type = NULL_TREE; if (!declared_type && !saw_friend && !error_p) permerror (input_location, "declaration does not declare anything"); /* Check for an anonymous union. */ else if (declared_type && RECORD_OR_UNION_CODE_P (TREE_CODE (declared_type)) && TYPE_UNNAMED_P (declared_type)) { /* 7/3 In a simple-declaration, the optional init-declarator-list can be omitted only when declaring a class (clause 9) or enumeration (7.2), that is, when the decl-specifier-seq contains either a class-specifier, an elaborated-type-specifier with a class-key (9.1), or an enum-specifier. In these cases and whenever a class-specifier or enum-specifier is present in the decl-specifier-seq, the identifiers in these specifiers are among the names being declared by the declaration (as class-name, enum-names, or enumerators, depending on the syntax). In such cases, and except for the declaration of an unnamed bit-field (9.6), the decl-specifier-seq shall introduce one or more names into the program, or shall redeclare a name introduced by a previous declaration. [Example: enum { }; // ill-formed typedef class { }; // ill-formed --end example] */ if (saw_typedef) { error_at (declspecs->locations[ds_typedef], "missing type-name in typedef-declaration"); return NULL_TREE; } /* Anonymous unions are objects, so they can have specifiers. */; SET_ANON_AGGR_TYPE_P (declared_type); if (TREE_CODE (declared_type) != UNION_TYPE) pedwarn (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (declared_type)), OPT_Wpedantic, "ISO C++ prohibits anonymous structs"); } else { if (decl_spec_seq_has_spec_p (declspecs, ds_inline)) error_at (declspecs->locations[ds_inline], "%<inline%> can only be specified for functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_virtual)) error_at (declspecs->locations[ds_virtual], "%<virtual%> can only be specified for functions"); else if (saw_friend && (!current_class_type || current_scope () != current_class_type)) error_at (declspecs->locations[ds_friend], "%<friend%> can only be specified inside a class"); else if (decl_spec_seq_has_spec_p (declspecs, ds_explicit)) error_at (declspecs->locations[ds_explicit], "%<explicit%> can only be specified for constructors"); else if (declspecs->storage_class) error_at (declspecs->locations[ds_storage_class], "a storage class can only be specified for objects " "and functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_const)) error_at (declspecs->locations[ds_const], "%<const%> can only be specified for objects and " "functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_volatile)) error_at (declspecs->locations[ds_volatile], "%<volatile%> can only be specified for objects and " "functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_restrict)) error_at (declspecs->locations[ds_restrict], "%<__restrict%> can only be specified for objects and " "functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_thread)) error_at (declspecs->locations[ds_thread], "%<__thread%> can only be specified for objects " "and functions"); else if (saw_typedef) warning_at (declspecs->locations[ds_typedef], 0, "%<typedef%> was ignored in this declaration"); else if (decl_spec_seq_has_spec_p (declspecs, ds_constexpr)) error_at (declspecs->locations[ds_constexpr], "%qs cannot be used for type declarations", "constexpr"); else if (decl_spec_seq_has_spec_p (declspecs, ds_constinit)) error_at (declspecs->locations[ds_constinit], "%qs cannot be used for type declarations", "constinit"); else if (decl_spec_seq_has_spec_p (declspecs, ds_consteval)) error_at (declspecs->locations[ds_consteval], "%qs cannot be used for type declarations", "consteval"); } if (declspecs->attributes && warn_attributes && declared_type) { location_t loc; if (!CLASS_TYPE_P (declared_type) || !CLASSTYPE_TEMPLATE_INSTANTIATION (declared_type)) /* For a non-template class, use the name location. */ loc = location_of (declared_type); else /* For a template class (an explicit instantiation), use the current location. */ loc = input_location; if (explicit_type_instantiation_p) /* [dcl.attr.grammar]/4: No attribute-specifier-seq shall appertain to an explicit instantiation. */ { if (warning_at (loc, OPT_Wattributes, "attribute ignored in explicit instantiation %q#T", declared_type)) inform (loc, "no attribute can be applied to " "an explicit instantiation"); } else warn_misplaced_attr_for_class_type (loc, declared_type); } return declared_type; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. C++: may have to grok the declspecs to learn about static, complain for anonymous unions. Returns the TYPE declared -- or NULL_TREE if none. */ tree shadow_tag (cp_decl_specifier_seq *declspecs) { tree t = check_tag_decl (declspecs, /*explicit_type_instantiation_p=*/false); if (!t) return NULL_TREE; if (maybe_process_partial_specialization (t) == error_mark_node) return NULL_TREE; /* This is where the variables in an anonymous union are declared. An anonymous union declaration looks like: union { ... } ; because there is no declarator after the union, the parser sends that declaration here. */ if (ANON_AGGR_TYPE_P (t)) { fixup_anonymous_aggr (t); if (TYPE_FIELDS (t)) { tree decl = grokdeclarator (/*declarator=*/NULL, declspecs, NORMAL, 0, NULL); finish_anon_union (decl); } } return t; } /* Decode a "typename", such as "int **", returning a ..._TYPE node. */ tree groktypename (cp_decl_specifier_seq *type_specifiers, const cp_declarator *declarator, bool is_template_arg) { tree attrs; tree type; enum decl_context context = is_template_arg ? TEMPLATE_TYPE_ARG : TYPENAME; attrs = type_specifiers->attributes; type_specifiers->attributes = NULL_TREE; type = grokdeclarator (declarator, type_specifiers, context, 0, &attrs); if (attrs && type != error_mark_node) { if (CLASS_TYPE_P (type)) warning (OPT_Wattributes, "ignoring attributes applied to class type %qT " "outside of definition", type); else if (MAYBE_CLASS_TYPE_P (type)) /* A template type parameter or other dependent type. */ warning (OPT_Wattributes, "ignoring attributes applied to dependent " "type %qT without an associated declaration", type); else cplus_decl_attributes (&type, attrs, 0); } return type; } /* Process a DECLARATOR for a function-scope variable declaration, namespace-scope variable declaration, or function declaration. (Function definitions go through start_function; class member declarations appearing in the body of the class go through grokfield.) The DECL corresponding to the DECLARATOR is returned. If an error occurs, the error_mark_node is returned instead. DECLSPECS are the decl-specifiers for the declaration. INITIALIZED is SD_INITIALIZED if an explicit initializer is present, or SD_DEFAULTED for an explicitly defaulted function, or SD_DELETED for an explicitly deleted function, but 0 (SD_UNINITIALIZED) if this is a variable implicitly initialized via a default constructor. ATTRIBUTES and PREFIX_ATTRIBUTES are GNU attributes associated with this declaration. The scope represented by the context of the returned DECL is pushed (if it is not the global namespace) and is assigned to *PUSHED_SCOPE_P. The caller is then responsible for calling pop_scope on *PUSHED_SCOPE_P if it is set. */ tree start_decl (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, int initialized, tree attributes, tree prefix_attributes, tree *pushed_scope_p) { tree decl; tree context; bool was_public; int flags; bool alias; *pushed_scope_p = NULL_TREE; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; attributes = chainon (attributes, prefix_attributes); decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, &attributes); deprecated_state = DEPRECATED_NORMAL; if (decl == NULL_TREE || VOID_TYPE_P (decl) || decl == error_mark_node) return error_mark_node; context = CP_DECL_CONTEXT (decl); if (context != global_namespace) *pushed_scope_p = push_scope (context); /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell `cp_finish_decl' to ignore the initializer once it is parsed. */ if (initialized && TREE_CODE (decl) == TYPE_DECL) { error_at (DECL_SOURCE_LOCATION (decl), "typedef %qD is initialized (use %qs instead)", decl, "decltype"); return error_mark_node; } if (initialized) { if (! toplevel_bindings_p () && DECL_EXTERNAL (decl)) warning (0, "declaration of %q#D has %<extern%> and is initialized", decl); DECL_EXTERNAL (decl) = 0; if (toplevel_bindings_p ()) TREE_STATIC (decl) = 1; } alias = lookup_attribute ("alias", DECL_ATTRIBUTES (decl)) != 0; if (alias && TREE_CODE (decl) == FUNCTION_DECL) record_key_method_defined (decl); /* If this is a typedef that names the class for linkage purposes (7.1.3p8), apply any attributes directly to the type. */ if (TREE_CODE (decl) == TYPE_DECL && OVERLOAD_TYPE_P (TREE_TYPE (decl)) && decl == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (decl)))) flags = ATTR_FLAG_TYPE_IN_PLACE; else flags = 0; /* Set attributes here so if duplicate decl, will have proper attributes. */ cplus_decl_attributes (&decl, attributes, flags); /* Dllimported symbols cannot be defined. Static data members (which can be initialized in-class and dllimported) go through grokfield, not here, so we don't need to exclude those decls when checking for a definition. */ if (initialized && DECL_DLLIMPORT_P (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "definition of %q#D is marked %<dllimport%>", decl); DECL_DLLIMPORT_P (decl) = 0; } /* If #pragma weak was used, mark the decl weak now. */ if (!processing_template_decl && !DECL_DECOMPOSITION_P (decl)) maybe_apply_pragma_weak (decl); if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning_at (DECL_SOURCE_LOCATION (decl), 0, "inline function %qD given attribute %qs", decl, "noinline"); if (TYPE_P (context) && COMPLETE_TYPE_P (complete_type (context))) { bool this_tmpl = (processing_template_decl > template_class_depth (context)); if (VAR_P (decl)) { tree field = lookup_field (context, DECL_NAME (decl), 0, false); if (field == NULL_TREE || !(VAR_P (field) || variable_template_p (field))) error ("%q+#D is not a static data member of %q#T", decl, context); else if (variable_template_p (field) && (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl))) /* OK, specialization was already checked. */; else if (variable_template_p (field) && !this_tmpl) { error_at (DECL_SOURCE_LOCATION (decl), "non-member-template declaration of %qD", decl); inform (DECL_SOURCE_LOCATION (field), "does not match " "member template declaration here"); return error_mark_node; } else { if (variable_template_p (field)) field = DECL_TEMPLATE_RESULT (field); if (DECL_CONTEXT (field) != context) { if (!same_type_p (DECL_CONTEXT (field), context)) permerror (input_location, "ISO C++ does not permit %<%T::%D%> " "to be defined as %<%T::%D%>", DECL_CONTEXT (field), DECL_NAME (decl), context, DECL_NAME (decl)); DECL_CONTEXT (decl) = DECL_CONTEXT (field); } /* Static data member are tricky; an in-class initialization still doesn't provide a definition, so the in-class declaration will have DECL_EXTERNAL set, but will have an initialization. Thus, duplicate_decls won't warn about this situation, and so we check here. */ if (initialized && DECL_INITIALIZED_IN_CLASS_P (field)) error ("duplicate initialization of %qD", decl); field = duplicate_decls (decl, field, /*newdecl_is_friend=*/false); if (field == error_mark_node) return error_mark_node; else if (field) decl = field; } } else { tree field = check_classfn (context, decl, this_tmpl ? current_template_parms : NULL_TREE); if (field && field != error_mark_node && duplicate_decls (decl, field, /*newdecl_is_friend=*/false)) decl = field; } /* cp_finish_decl sets DECL_EXTERNAL if DECL_IN_AGGR_P is set. */ DECL_IN_AGGR_P (decl) = 0; /* Do not mark DECL as an explicit specialization if it was not already marked as an instantiation; a declaration should never be marked as a specialization unless we know what template is being specialized. */ if (DECL_LANG_SPECIFIC (decl) && DECL_USE_TEMPLATE (decl)) { SET_DECL_TEMPLATE_SPECIALIZATION (decl); if (TREE_CODE (decl) == FUNCTION_DECL) DECL_COMDAT (decl) = (TREE_PUBLIC (decl) && DECL_DECLARED_INLINE_P (decl)); else DECL_COMDAT (decl) = false; /* [temp.expl.spec] An explicit specialization of a static data member of a template is a definition if the declaration includes an initializer; otherwise, it is a declaration. We check for processing_specialization so this only applies to the new specialization syntax. */ if (!initialized && processing_specialization) DECL_EXTERNAL (decl) = 1; } if (DECL_EXTERNAL (decl) && ! DECL_TEMPLATE_SPECIALIZATION (decl) /* Aliases are definitions. */ && !alias) permerror (declarator->id_loc, "declaration of %q#D outside of class is not definition", decl); } was_public = TREE_PUBLIC (decl); /* Enter this declaration into the symbol table. Don't push the plain VAR_DECL for a variable template. */ if (!template_parm_scope_p () || !VAR_P (decl)) decl = maybe_push_decl (decl); if (processing_template_decl) { /* Make sure that for a `constinit' decl push_template_decl creates a DECL_TEMPLATE_INFO info for us, so that cp_finish_decl can then set TINFO_VAR_DECLARED_CONSTINIT. */ if (decl_spec_seq_has_spec_p (declspecs, ds_constinit)) retrofit_lang_decl (decl); decl = push_template_decl (decl); } if (decl == error_mark_node) return error_mark_node; if (VAR_P (decl) && DECL_NAMESPACE_SCOPE_P (decl) && !TREE_PUBLIC (decl) && !was_public && !DECL_THIS_STATIC (decl) && !DECL_ARTIFICIAL (decl)) { /* This is a const variable with implicit 'static'. Set DECL_THIS_STATIC so we can tell it from variables that are !TREE_PUBLIC because of the anonymous namespace. */ gcc_assert (CP_TYPE_CONST_P (TREE_TYPE (decl)) || errorcount); DECL_THIS_STATIC (decl) = 1; } if (current_function_decl && VAR_P (decl) && DECL_DECLARED_CONSTEXPR_P (current_function_decl)) { bool ok = false; if (CP_DECL_THREAD_LOCAL_P (decl)) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared %<thread_local%> in %qs function", decl, DECL_IMMEDIATE_FUNCTION_P (current_function_decl) ? "consteval" : "constexpr"); else if (TREE_STATIC (decl)) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared %<static%> in %qs function", decl, DECL_IMMEDIATE_FUNCTION_P (current_function_decl) ? "consteval" : "constexpr"); else ok = true; if (!ok) cp_function_chain->invalid_constexpr = true; } if (!processing_template_decl && VAR_P (decl)) start_decl_1 (decl, initialized); return decl; } /* Process the declaration of a variable DECL. INITIALIZED is true iff DECL is explicitly initialized. (INITIALIZED is false if the variable is initialized via an implicitly-called constructor.) This function must be called for ordinary variables (including, for example, implicit instantiations of templates), but must not be called for template declarations. */ void start_decl_1 (tree decl, bool initialized) { tree type; bool complete_p; bool aggregate_definition_p; gcc_assert (!processing_template_decl); if (error_operand_p (decl)) return; gcc_assert (VAR_P (decl)); type = TREE_TYPE (decl); complete_p = COMPLETE_TYPE_P (type); aggregate_definition_p = MAYBE_CLASS_TYPE_P (type) && !DECL_EXTERNAL (decl); /* If an explicit initializer is present, or if this is a definition of an aggregate, then we need a complete type at this point. (Scalars are always complete types, so there is nothing to check.) This code just sets COMPLETE_P; errors (if necessary) are issued below. */ if ((initialized || aggregate_definition_p) && !complete_p && COMPLETE_TYPE_P (complete_type (type))) { complete_p = true; /* We will not yet have set TREE_READONLY on DECL if the type was "const", but incomplete, before this point. But, now, we have a complete type, so we can try again. */ cp_apply_type_quals_to_decl (cp_type_quals (type), decl); } if (is_global_var (decl)) { type_context_kind context = (DECL_THREAD_LOCAL_P (decl) ? TCTX_THREAD_STORAGE : TCTX_STATIC_STORAGE); verify_type_context (input_location, context, TREE_TYPE (decl)); } if (initialized) /* Is it valid for this decl to have an initializer at all? */ { /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ if (complete_p) ; /* A complete type is ok. */ else if (type_uses_auto (type)) ; /* An auto type is ok. */ else if (TREE_CODE (type) != ARRAY_TYPE) { error ("variable %q#D has initializer but incomplete type", decl); type = TREE_TYPE (decl) = error_mark_node; } else if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (type)))) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)) error ("elements of array %q#D have incomplete type", decl); /* else we already gave an error in start_decl. */ } } else if (aggregate_definition_p && !complete_p) { if (type_uses_auto (type)) gcc_assert (CLASS_PLACEHOLDER_TEMPLATE (type)); else { error ("aggregate %q#D has incomplete type and cannot be defined", decl); /* Change the type so that assemble_variable will give DECL an rtl we can live with: (mem (const_int 0)). */ type = TREE_TYPE (decl) = error_mark_node; } } /* Create a new scope to hold this declaration if necessary. Whether or not a new scope is necessary cannot be determined until after the type has been completed; if the type is a specialization of a class template it is not until after instantiation has occurred that TYPE_HAS_NONTRIVIAL_DESTRUCTOR will be set correctly. */ maybe_push_cleanup_level (type); } /* Handle initialization of references. DECL, TYPE, and INIT have the same meaning as in cp_finish_decl. *CLEANUP must be NULL on entry, but will be set to a new CLEANUP_STMT if a temporary is created that must be destroyed subsequently. Returns an initializer expression to use to initialize DECL, or NULL if the initialization can be performed statically. Quotes on semantics can be found in ARM 8.4.3. */ static tree grok_reference_init (tree decl, tree type, tree init, int flags) { if (init == NULL_TREE) { if ((DECL_LANG_SPECIFIC (decl) == 0 || DECL_IN_AGGR_P (decl) == 0) && ! DECL_THIS_EXTERN (decl)) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as reference but not initialized", decl); return NULL_TREE; } tree ttype = TREE_TYPE (type); if (TREE_CODE (init) == TREE_LIST) { /* This handles (C++20 only) code like const A& r(1, 2, 3); where we treat the parenthesized list as a CONSTRUCTOR. */ if (TREE_TYPE (init) == NULL_TREE && CP_AGGREGATE_TYPE_P (ttype) && !DECL_DECOMPOSITION_P (decl) && (cxx_dialect >= cxx2a)) { /* We don't know yet if we should treat const A& r(1) as const A& r{1}. */ if (list_length (init) == 1) { flags |= LOOKUP_AGGREGATE_PAREN_INIT; init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } /* If the list had more than one element, the code is ill-formed pre-C++20, so we can build a constructor right away. */ else { init = build_constructor_from_list (init_list_type_node, init); CONSTRUCTOR_IS_DIRECT_INIT (init) = true; CONSTRUCTOR_IS_PAREN_INIT (init) = true; } } else init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } if (TREE_CODE (ttype) != ARRAY_TYPE && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE) /* Note: default conversion is only called in very special cases. */ init = decay_conversion (init, tf_warning_or_error); /* check_initializer handles this for non-reference variables, but for references we need to do it here or the initializer will get the incomplete array type and confuse later calls to cp_complete_array_type. */ if (TREE_CODE (ttype) == ARRAY_TYPE && TYPE_DOMAIN (ttype) == NULL_TREE && (BRACE_ENCLOSED_INITIALIZER_P (init) || TREE_CODE (init) == STRING_CST)) { cp_complete_array_type (&ttype, init, false); if (ttype != TREE_TYPE (type)) type = cp_build_reference_type (ttype, TYPE_REF_IS_RVALUE (type)); } /* Convert INIT to the reference type TYPE. This may involve the creation of a temporary, whose lifetime must be the same as that of the reference. If so, a DECL_EXPR for the temporary will be added just after the DECL_EXPR for DECL. That's why we don't set DECL_INITIAL for local references (instead assigning to them explicitly); we need to allow the temporary to be initialized first. */ return initialize_reference (type, init, flags, tf_warning_or_error); } /* Designated initializers in arrays are not supported in GNU C++. The parser cannot detect this error since it does not know whether a given brace-enclosed initializer is for a class type or for an array. This function checks that CE does not use a designated initializer. If it does, an error is issued. Returns true if CE is valid, i.e., does not have a designated initializer. */ bool check_array_designated_initializer (constructor_elt *ce, unsigned HOST_WIDE_INT index) { /* Designated initializers for array elements are not supported. */ if (ce->index) { /* The parser only allows identifiers as designated initializers. */ if (ce->index == error_mark_node) { error ("name used in a GNU-style designated " "initializer for an array"); return false; } else if (identifier_p (ce->index)) { error ("name %qD used in a GNU-style designated " "initializer for an array", ce->index); return false; } tree ce_index = build_expr_type_conversion (WANT_INT | WANT_ENUM, ce->index, true); if (ce_index && INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (ce_index)) && (TREE_CODE (ce_index = fold_non_dependent_expr (ce_index)) == INTEGER_CST)) { /* A C99 designator is OK if it matches the current index. */ if (wi::to_wide (ce_index) == index) { ce->index = ce_index; return true; } else sorry ("non-trivial designated initializers not supported"); } else error_at (cp_expr_loc_or_input_loc (ce->index), "C99 designator %qE is not an integral constant-expression", ce->index); return false; } return true; } /* When parsing `int a[] = {1, 2};' we don't know the size of the array until we finish parsing the initializer. If that's the situation we're in, update DECL accordingly. */ static void maybe_deduce_size_from_array_init (tree decl, tree init) { tree type = TREE_TYPE (decl); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE && TREE_CODE (decl) != TYPE_DECL) { /* do_default is really a C-ism to deal with tentative definitions. But let's leave it here to ease the eventual merge. */ int do_default = !DECL_EXTERNAL (decl); tree initializer = init ? init : DECL_INITIAL (decl); int failure = 0; /* Check that there are no designated initializers in INIT, as those are not supported in GNU C++, and as the middle-end will crash if presented with a non-numeric designated initializer. */ if (initializer && BRACE_ENCLOSED_INITIALIZER_P (initializer)) { vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initializer); constructor_elt *ce; HOST_WIDE_INT i; FOR_EACH_VEC_SAFE_ELT (v, i, ce) { if (instantiation_dependent_expression_p (ce->index)) return; if (!check_array_designated_initializer (ce, i)) failure = 1; } } if (failure) TREE_TYPE (decl) = error_mark_node; else { failure = cp_complete_array_type (&TREE_TYPE (decl), initializer, do_default); if (failure == 1) { error_at (cp_expr_loc_or_loc (initializer, DECL_SOURCE_LOCATION (decl)), "initializer fails to determine size of %qD", decl); } else if (failure == 2) { if (do_default) { error_at (DECL_SOURCE_LOCATION (decl), "array size missing in %qD", decl); } /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it's not `static', then don't mark it extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; } else if (failure == 3) { error_at (DECL_SOURCE_LOCATION (decl), "zero-size array %qD", decl); } } cp_apply_type_quals_to_decl (cp_type_quals (TREE_TYPE (decl)), decl); relayout_decl (decl); } } /* Set DECL_SIZE, DECL_ALIGN, etc. for DECL (a VAR_DECL), and issue any appropriate error messages regarding the layout. */ static void layout_var_decl (tree decl) { tree type; type = TREE_TYPE (decl); if (type == error_mark_node) return; /* If we haven't already laid out this declaration, do so now. Note that we must not call complete type for an external object because it's type might involve templates that we are not supposed to instantiate yet. (And it's perfectly valid to say `extern X x' for some incomplete type `X'.) */ if (!DECL_EXTERNAL (decl)) complete_type (type); if (!DECL_SIZE (decl) && TREE_TYPE (decl) != error_mark_node && complete_or_array_type_p (type)) layout_decl (decl, 0); if (!DECL_EXTERNAL (decl) && DECL_SIZE (decl) == NULL_TREE) { /* An automatic variable with an incomplete type: that is an error. Don't talk about array types here, since we took care of that message in grokdeclarator. */ error_at (DECL_SOURCE_LOCATION (decl), "storage size of %qD isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } #if 0 /* Keep this code around in case we later want to control debug info based on whether a type is "used". (jason 1999-11-11) */ else if (!DECL_EXTERNAL (decl) && MAYBE_CLASS_TYPE_P (ttype)) /* Let debugger know it should output info for this type. */ note_debug_info_needed (ttype); if (TREE_STATIC (decl) && DECL_CLASS_SCOPE_P (decl)) note_debug_info_needed (DECL_CONTEXT (decl)); #endif if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != NULL_TREE && ! TREE_CONSTANT (DECL_SIZE (decl))) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error_at (DECL_SOURCE_LOCATION (decl), "storage size of %qD isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } } /* If a local static variable is declared in an inline function, or if we have a weak definition, we must endeavor to create only one instance of the variable at link-time. */ void maybe_commonize_var (tree decl) { /* Don't mess with __FUNCTION__ and similar. */ if (DECL_ARTIFICIAL (decl)) return; /* Static data in a function with comdat linkage also has comdat linkage. */ if ((TREE_STATIC (decl) && DECL_FUNCTION_SCOPE_P (decl) && vague_linkage_p (DECL_CONTEXT (decl))) || (TREE_PUBLIC (decl) && DECL_INLINE_VAR_P (decl))) { if (flag_weak) { /* With weak symbols, we simply make the variable COMDAT; that will cause copies in multiple translations units to be merged. */ comdat_linkage (decl); } else { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) { /* Without weak symbols, we can use COMMON to merge uninitialized variables. */ TREE_PUBLIC (decl) = 1; DECL_COMMON (decl) = 1; } else { /* While for initialized variables, we must use internal linkage -- which means that multiple copies will not be merged. */ TREE_PUBLIC (decl) = 0; DECL_COMMON (decl) = 0; DECL_INTERFACE_KNOWN (decl) = 1; const char *msg; if (DECL_INLINE_VAR_P (decl)) msg = G_("sorry: semantics of inline variable " "%q#D are wrong (you%'ll wind up with " "multiple copies)"); else msg = G_("sorry: semantics of inline function " "static data %q#D are wrong (you%'ll wind " "up with multiple copies)"); if (warning_at (DECL_SOURCE_LOCATION (decl), 0, msg, decl)) inform (DECL_SOURCE_LOCATION (decl), "you can work around this by removing the initializer"); } } } } /* Issue an error message if DECL is an uninitialized const variable. CONSTEXPR_CONTEXT_P is true when the function is called in a constexpr context from potential_constant_expression. Returns true if all is well, false otherwise. */ bool check_for_uninitialized_const_var (tree decl, bool constexpr_context_p, tsubst_flags_t complain) { tree type = strip_array_types (TREE_TYPE (decl)); /* ``Unless explicitly declared extern, a const object does not have external linkage and must be initialized. ($8.4; $12.1)'' ARM 7.1.6 */ if (VAR_P (decl) && !TYPE_REF_P (type) && (CP_TYPE_CONST_P (type) /* C++20 permits trivial default initialization in constexpr context (P1331R2). */ || (cxx_dialect < cxx2a && (constexpr_context_p || var_in_constexpr_fn (decl)))) && !DECL_NONTRIVIALLY_INITIALIZED_P (decl)) { tree field = default_init_uninitialized_part (type); if (!field) return true; bool show_notes = true; if (!constexpr_context_p || cxx_dialect >= cxx2a) { if (CP_TYPE_CONST_P (type)) { if (complain & tf_error) show_notes = permerror (DECL_SOURCE_LOCATION (decl), "uninitialized %<const %D%>", decl); } else { if (!is_instantiation_of_constexpr (current_function_decl) && (complain & tf_error)) error_at (DECL_SOURCE_LOCATION (decl), "uninitialized variable %qD in %<constexpr%> " "function", decl); else show_notes = false; cp_function_chain->invalid_constexpr = true; } } else if (complain & tf_error) error_at (DECL_SOURCE_LOCATION (decl), "uninitialized variable %qD in %<constexpr%> context", decl); if (show_notes && CLASS_TYPE_P (type) && (complain & tf_error)) { tree defaulted_ctor; inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "%q#T has no user-provided default constructor", type); defaulted_ctor = in_class_defaulted_default_constructor (type); if (defaulted_ctor) inform (DECL_SOURCE_LOCATION (defaulted_ctor), "constructor is not user-provided because it is " "explicitly defaulted in the class body"); inform (DECL_SOURCE_LOCATION (field), "and the implicitly-defined constructor does not " "initialize %q#D", field); } return false; } return true; } /* Structure holding the current initializer being processed by reshape_init. CUR is a pointer to the current element being processed, END is a pointer after the last element present in the initializer. */ struct reshape_iter { constructor_elt *cur; constructor_elt *end; }; static tree reshape_init_r (tree, reshape_iter *, tree, tsubst_flags_t); /* FIELD is a FIELD_DECL or NULL. In the former case, the value returned is the next FIELD_DECL (possibly FIELD itself) that can be initialized. If there are no more such fields, the return value will be NULL. */ tree next_initializable_field (tree field) { while (field && (TREE_CODE (field) != FIELD_DECL || DECL_UNNAMED_BIT_FIELD (field) || (DECL_ARTIFICIAL (field) /* In C++17, don't skip base class fields. */ && !(cxx_dialect >= cxx17 && DECL_FIELD_IS_BASE (field)) /* Don't skip vptr fields. We might see them when we're called from reduced_constant_expression_p. */ && !DECL_VIRTUAL_P (field)))) field = DECL_CHAIN (field); return field; } /* Return true for [dcl.init.list] direct-list-initialization from single element of enumeration with a fixed underlying type. */ bool is_direct_enum_init (tree type, tree init) { if (cxx_dialect >= cxx17 && TREE_CODE (type) == ENUMERAL_TYPE && ENUM_FIXED_UNDERLYING_TYPE_P (type) && TREE_CODE (init) == CONSTRUCTOR && CONSTRUCTOR_IS_DIRECT_INIT (init) && CONSTRUCTOR_NELTS (init) == 1) return true; return false; } /* Subroutine of reshape_init_array and reshape_init_vector, which does the actual work. ELT_TYPE is the element type of the array. MAX_INDEX is an INTEGER_CST representing the size of the array minus one (the maximum index), or NULL_TREE if the array was declared without specifying the size. D is the iterator within the constructor. */ static tree reshape_init_array_1 (tree elt_type, tree max_index, reshape_iter *d, tree first_initializer_p, tsubst_flags_t complain) { tree new_init; bool sized_array_p = (max_index && TREE_CONSTANT (max_index)); unsigned HOST_WIDE_INT max_index_cst = 0; unsigned HOST_WIDE_INT index; /* The initializer for an array is always a CONSTRUCTOR. If this is the outermost CONSTRUCTOR and the element type is non-aggregate, we don't need to build a new one. But don't reuse if not complaining; if this is tentative, we might also reshape to another type (95319). */ bool reuse = (first_initializer_p && (complain & tf_error) && !CP_AGGREGATE_TYPE_P (elt_type) && !TREE_SIDE_EFFECTS (first_initializer_p)); if (reuse) new_init = first_initializer_p; else new_init = build_constructor (init_list_type_node, NULL); if (sized_array_p) { /* Minus 1 is used for zero sized arrays. */ if (integer_all_onesp (max_index)) return new_init; if (tree_fits_uhwi_p (max_index)) max_index_cst = tree_to_uhwi (max_index); /* sizetype is sign extended, not zero extended. */ else max_index_cst = tree_to_uhwi (fold_convert (size_type_node, max_index)); } /* Loop until there are no more initializers. */ for (index = 0; d->cur != d->end && (!sized_array_p || index <= max_index_cst); ++index) { tree elt_init; constructor_elt *old_cur = d->cur; check_array_designated_initializer (d->cur, index); elt_init = reshape_init_r (elt_type, d, /*first_initializer_p=*/NULL_TREE, complain); if (elt_init == error_mark_node) return error_mark_node; tree idx = size_int (index); if (reuse) { old_cur->index = idx; old_cur->value = elt_init; } else CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (new_init), idx, elt_init); if (!TREE_CONSTANT (elt_init)) TREE_CONSTANT (new_init) = false; /* This can happen with an invalid initializer (c++/54501). */ if (d->cur == old_cur && !sized_array_p) break; } return new_init; } /* Subroutine of reshape_init_r, processes the initializers for arrays. Parameters are the same of reshape_init_r. */ static tree reshape_init_array (tree type, reshape_iter *d, tree first_initializer_p, tsubst_flags_t complain) { tree max_index = NULL_TREE; gcc_assert (TREE_CODE (type) == ARRAY_TYPE); if (TYPE_DOMAIN (type)) max_index = array_type_nelts (type); return reshape_init_array_1 (TREE_TYPE (type), max_index, d, first_initializer_p, complain); } /* Subroutine of reshape_init_r, processes the initializers for vectors. Parameters are the same of reshape_init_r. */ static tree reshape_init_vector (tree type, reshape_iter *d, tsubst_flags_t complain) { tree max_index = NULL_TREE; gcc_assert (VECTOR_TYPE_P (type)); if (COMPOUND_LITERAL_P (d->cur->value)) { tree value = d->cur->value; if (!same_type_p (TREE_TYPE (value), type)) { if (complain & tf_error) error ("invalid type %qT as initializer for a vector of type %qT", TREE_TYPE (d->cur->value), type); value = error_mark_node; } ++d->cur; return value; } /* For a vector, we initialize it as an array of the appropriate size. */ if (VECTOR_TYPE_P (type)) max_index = size_int (TYPE_VECTOR_SUBPARTS (type) - 1); return reshape_init_array_1 (TREE_TYPE (type), max_index, d, NULL_TREE, complain); } /* Subroutine of reshape_init_r, processes the initializers for classes or union. Parameters are the same of reshape_init_r. */ static tree reshape_init_class (tree type, reshape_iter *d, bool first_initializer_p, tsubst_flags_t complain) { tree field; tree new_init; gcc_assert (CLASS_TYPE_P (type)); /* The initializer for a class is always a CONSTRUCTOR. */ new_init = build_constructor (init_list_type_node, NULL); field = next_initializable_field (TYPE_FIELDS (type)); if (!field) { /* [dcl.init.aggr] An initializer for an aggregate member that is an empty class shall have the form of an empty initializer-list {}. */ if (!first_initializer_p) { if (complain & tf_error) error ("initializer for %qT must be brace-enclosed", type); return error_mark_node; } return new_init; } /* Loop through the initializable fields, gathering initializers. */ while (d->cur != d->end) { tree field_init; constructor_elt *old_cur = d->cur; /* Handle designated initializers, as an extension. */ if (d->cur->index) { if (d->cur->index == error_mark_node) return error_mark_node; if (TREE_CODE (d->cur->index) == FIELD_DECL) { /* We already reshaped this. */ if (field != d->cur->index) { tree id = DECL_NAME (d->cur->index); gcc_assert (id); gcc_checking_assert (d->cur->index == get_class_binding (type, id)); field = d->cur->index; } } else if (TREE_CODE (d->cur->index) == IDENTIFIER_NODE) field = get_class_binding (type, d->cur->index); else { if (complain & tf_error) error ("%<[%E] =%> used in a GNU-style designated initializer" " for class %qT", d->cur->index, type); return error_mark_node; } if (!field || TREE_CODE (field) != FIELD_DECL) { if (complain & tf_error) error ("%qT has no non-static data member named %qD", type, d->cur->index); return error_mark_node; } } /* If we processed all the member of the class, we are done. */ if (!field) break; field_init = reshape_init_r (TREE_TYPE (field), d, /*first_initializer_p=*/NULL_TREE, complain); if (field_init == error_mark_node) return error_mark_node; if (d->cur == old_cur && d->cur->index) { /* This can happen with an invalid initializer for a flexible array member (c++/54441). */ if (complain & tf_error) error ("invalid initializer for %q#D", field); return error_mark_node; } CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (new_init), field, field_init); /* [dcl.init.aggr] When a union is initialized with a brace-enclosed initializer, the braces shall only contain an initializer for the first member of the union. */ if (TREE_CODE (type) == UNION_TYPE) break; field = next_initializable_field (DECL_CHAIN (field)); } return new_init; } /* Subroutine of reshape_init_r. We're in a context where C99 initializer designators are not valid; either complain or return true to indicate that reshape_init_r should return error_mark_node. */ static bool has_designator_problem (reshape_iter *d, tsubst_flags_t complain) { if (d->cur->index) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (d->cur->index), "C99 designator %qE outside aggregate initializer", d->cur->index); else return true; } return false; } /* Subroutine of reshape_init, which processes a single initializer (part of a CONSTRUCTOR). TYPE is the type of the variable being initialized, D is the iterator within the CONSTRUCTOR which points to the initializer to process. If this is the first initializer of the outermost CONSTRUCTOR node, FIRST_INITIALIZER_P is that CONSTRUCTOR; otherwise, it is NULL_TREE. */ static tree reshape_init_r (tree type, reshape_iter *d, tree first_initializer_p, tsubst_flags_t complain) { tree init = d->cur->value; if (error_operand_p (init)) return error_mark_node; if (first_initializer_p && !CP_AGGREGATE_TYPE_P (type) && has_designator_problem (d, complain)) return error_mark_node; tree stripped_init = tree_strip_any_location_wrapper (init); if (TREE_CODE (type) == COMPLEX_TYPE) { /* A complex type can be initialized from one or two initializers, but braces are not elided. */ d->cur++; if (BRACE_ENCLOSED_INITIALIZER_P (stripped_init)) { if (CONSTRUCTOR_NELTS (stripped_init) > 2) { if (complain & tf_error) error ("too many initializers for %qT", type); else return error_mark_node; } } else if (first_initializer_p && d->cur != d->end) { vec<constructor_elt, va_gc> *v = 0; CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, d->cur->value); if (has_designator_problem (d, complain)) return error_mark_node; d->cur++; init = build_constructor (init_list_type_node, v); } return init; } /* A non-aggregate type is always initialized with a single initializer. */ if (!CP_AGGREGATE_TYPE_P (type)) { /* It is invalid to initialize a non-aggregate type with a brace-enclosed initializer before C++0x. We need to check for BRACE_ENCLOSED_INITIALIZER_P here because of g++.old-deja/g++.mike/p7626.C: a pointer-to-member constant is a CONSTRUCTOR (with a record type). */ if (TREE_CODE (stripped_init) == CONSTRUCTOR /* Don't complain about a capture-init. */ && !CONSTRUCTOR_IS_DIRECT_INIT (stripped_init) && BRACE_ENCLOSED_INITIALIZER_P (stripped_init)) /* p7626.C */ { if (SCALAR_TYPE_P (type)) { if (cxx_dialect < cxx11) { if (complain & tf_error) error ("braces around scalar initializer for type %qT", type); init = error_mark_node; } else if (first_initializer_p || (CONSTRUCTOR_NELTS (stripped_init) > 0 && (BRACE_ENCLOSED_INITIALIZER_P (CONSTRUCTOR_ELT (stripped_init,0)->value)))) { if (complain & tf_error) error ("too many braces around scalar initializer " "for type %qT", type); init = error_mark_node; } } else maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); } d->cur++; return init; } /* "If T is a class type and the initializer list has a single element of type cv U, where U is T or a class derived from T, the object is initialized from that element." Even if T is an aggregate. */ if (cxx_dialect >= cxx11 && (CLASS_TYPE_P (type) || VECTOR_TYPE_P (type)) && first_initializer_p && d->end - d->cur == 1 && reference_related_p (type, TREE_TYPE (init))) { d->cur++; return init; } /* [dcl.init.aggr] All implicit type conversions (clause _conv_) are considered when initializing the aggregate member with an initializer from an initializer-list. If the initializer can initialize a member, the member is initialized. Otherwise, if the member is itself a non-empty subaggregate, brace elision is assumed and the initializer is considered for the initialization of the first member of the subaggregate. */ if ((TREE_CODE (init) != CONSTRUCTOR || COMPOUND_LITERAL_P (init)) /* But don't try this for the first initializer, since that would be looking through the outermost braces; A a2 = { a1 }; is not a valid aggregate initialization. */ && !first_initializer_p && (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (init)) || can_convert_arg (type, TREE_TYPE (init), init, LOOKUP_NORMAL, complain))) { d->cur++; return init; } /* [dcl.init.string] A char array (whether plain char, signed char, or unsigned char) can be initialized by a string-literal (optionally enclosed in braces); a wchar_t array can be initialized by a wide string-literal (optionally enclosed in braces). */ if (TREE_CODE (type) == ARRAY_TYPE && char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type)))) { tree str_init = init; tree stripped_str_init = stripped_init; /* Strip one level of braces if and only if they enclose a single element (as allowed by [dcl.init.string]). */ if (!first_initializer_p && TREE_CODE (stripped_str_init) == CONSTRUCTOR && CONSTRUCTOR_NELTS (stripped_str_init) == 1) { str_init = (*CONSTRUCTOR_ELTS (stripped_str_init))[0].value; stripped_str_init = tree_strip_any_location_wrapper (str_init); } /* If it's a string literal, then it's the initializer for the array as a whole. Otherwise, continue with normal initialization for array types (one value per array element). */ if (TREE_CODE (stripped_str_init) == STRING_CST) { if (has_designator_problem (d, complain)) return error_mark_node; d->cur++; return str_init; } } /* The following cases are about aggregates. If we are not within a full initializer already, and there is not a CONSTRUCTOR, it means that there is a missing set of braces (that is, we are processing the case for which reshape_init exists). */ if (!first_initializer_p) { if (TREE_CODE (stripped_init) == CONSTRUCTOR) { tree init_type = TREE_TYPE (init); if (init_type && TYPE_PTRMEMFUNC_P (init_type)) /* There is no need to call reshape_init for pointer-to-member function initializers, as they are always constructed correctly by the front end. Here we have e.g. {.__pfn=0B, .__delta=0}, which is missing outermost braces. We should warn below, and one of the routines below will wrap it in additional { }. */; /* For a nested compound literal, proceed to specialized routines, to handle initialization of arrays and similar. */ else if (COMPOUND_LITERAL_P (stripped_init)) gcc_assert (!BRACE_ENCLOSED_INITIALIZER_P (stripped_init)); /* A CONSTRUCTOR of the target's type is a previously digested initializer. */ else if (same_type_ignoring_top_level_qualifiers_p (type, init_type)) { ++d->cur; return init; } else { /* Something that hasn't been reshaped yet. */ ++d->cur; gcc_assert (BRACE_ENCLOSED_INITIALIZER_P (stripped_init)); return reshape_init (type, init, complain); } } if (complain & tf_warning) warning (OPT_Wmissing_braces, "missing braces around initializer for %qT", type); } /* Dispatch to specialized routines. */ if (CLASS_TYPE_P (type)) return reshape_init_class (type, d, first_initializer_p, complain); else if (TREE_CODE (type) == ARRAY_TYPE) return reshape_init_array (type, d, first_initializer_p, complain); else if (VECTOR_TYPE_P (type)) return reshape_init_vector (type, d, complain); else gcc_unreachable(); } /* Undo the brace-elision allowed by [dcl.init.aggr] in a brace-enclosed aggregate initializer. INIT is the CONSTRUCTOR containing the list of initializers describing a brace-enclosed initializer for an entity of the indicated aggregate TYPE. It may not presently match the shape of the TYPE; for example: struct S { int a; int b; }; struct S a[] = { 1, 2, 3, 4 }; Here INIT will hold a vector of four elements, rather than a vector of two elements, each itself a vector of two elements. This routine transforms INIT from the former form into the latter. The revised CONSTRUCTOR node is returned. */ tree reshape_init (tree type, tree init, tsubst_flags_t complain) { vec<constructor_elt, va_gc> *v; reshape_iter d; tree new_init; gcc_assert (BRACE_ENCLOSED_INITIALIZER_P (init)); v = CONSTRUCTOR_ELTS (init); /* An empty constructor does not need reshaping, and it is always a valid initializer. */ if (vec_safe_is_empty (v)) return init; /* Brace elision is not performed for a CONSTRUCTOR representing parenthesized aggregate initialization. */ if (CONSTRUCTOR_IS_PAREN_INIT (init)) return init; /* Handle [dcl.init.list] direct-list-initialization from single element of enumeration with a fixed underlying type. */ if (is_direct_enum_init (type, init)) { tree elt = CONSTRUCTOR_ELT (init, 0)->value; type = cv_unqualified (type); if (check_narrowing (ENUM_UNDERLYING_TYPE (type), elt, complain)) { warning_sentinel w (warn_useless_cast); warning_sentinel w2 (warn_ignored_qualifiers); return cp_build_c_cast (input_location, type, elt, tf_warning_or_error); } else return error_mark_node; } /* Recurse on this CONSTRUCTOR. */ d.cur = &(*v)[0]; d.end = d.cur + v->length (); new_init = reshape_init_r (type, &d, init, complain); if (new_init == error_mark_node) return error_mark_node; /* Make sure all the element of the constructor were used. Otherwise, issue an error about exceeding initializers. */ if (d.cur != d.end) { if (complain & tf_error) error ("too many initializers for %qT", type); return error_mark_node; } if (CONSTRUCTOR_IS_DIRECT_INIT (init) && BRACE_ENCLOSED_INITIALIZER_P (new_init)) CONSTRUCTOR_IS_DIRECT_INIT (new_init) = true; if (CONSTRUCTOR_IS_DESIGNATED_INIT (init) && BRACE_ENCLOSED_INITIALIZER_P (new_init)) CONSTRUCTOR_IS_DESIGNATED_INIT (new_init) = true; return new_init; } /* Verify array initializer. Returns true if errors have been reported. */ bool check_array_initializer (tree decl, tree type, tree init) { tree element_type = TREE_TYPE (type); /* Structured binding when initialized with an array type needs to have complete type. */ if (decl && DECL_DECOMPOSITION_P (decl) && !DECL_DECOMP_BASE (decl) && !COMPLETE_TYPE_P (type)) { error_at (DECL_SOURCE_LOCATION (decl), "structured binding has incomplete type %qT", type); TREE_TYPE (decl) = error_mark_node; return true; } /* The array type itself need not be complete, because the initializer may tell us how many elements are in the array. But, the elements of the array must be complete. */ if (!COMPLETE_TYPE_P (complete_type (element_type))) { if (decl) error_at (DECL_SOURCE_LOCATION (decl), "elements of array %q#D have incomplete type", decl); else error ("elements of array %q#T have incomplete type", type); return true; } location_t loc = (decl ? location_of (decl) : input_location); if (!verify_type_context (loc, TCTX_ARRAY_ELEMENT, element_type)) return true; /* A compound literal can't have variable size. */ if (init && !decl && ((COMPLETE_TYPE_P (type) && !TREE_CONSTANT (TYPE_SIZE (type))) || !TREE_CONSTANT (TYPE_SIZE (element_type)))) { error ("variable-sized compound literal"); return true; } return false; } /* Subroutine of check_initializer; args are passed down from that function. Set stmts_are_full_exprs_p to 1 across a call to build_aggr_init. */ static tree build_aggr_init_full_exprs (tree decl, tree init, int flags) { gcc_assert (stmts_are_full_exprs_p ()); return build_aggr_init (decl, init, flags, tf_warning_or_error); } /* Verify INIT (the initializer for DECL), and record the initialization in DECL_INITIAL, if appropriate. CLEANUP is as for grok_reference_init. If the return value is non-NULL, it is an expression that must be evaluated dynamically to initialize DECL. */ static tree check_initializer (tree decl, tree init, int flags, vec<tree, va_gc> **cleanups) { tree type; tree init_code = NULL; tree core_type; /* Things that are going to be initialized need to have complete type. */ TREE_TYPE (decl) = type = complete_type (TREE_TYPE (decl)); if (DECL_HAS_VALUE_EXPR_P (decl)) { /* A variable with DECL_HAS_VALUE_EXPR_P set is just a placeholder, it doesn't have storage to be initialized. */ gcc_assert (init == NULL_TREE); return NULL_TREE; } if (type == error_mark_node) /* We will have already complained. */ return NULL_TREE; if (TREE_CODE (type) == ARRAY_TYPE) { if (check_array_initializer (decl, type, init)) return NULL_TREE; } else if (!COMPLETE_TYPE_P (type)) { error_at (DECL_SOURCE_LOCATION (decl), "%q#D has incomplete type", decl); TREE_TYPE (decl) = error_mark_node; return NULL_TREE; } else /* There is no way to make a variable-sized class type in GNU C++. */ gcc_assert (TREE_CONSTANT (TYPE_SIZE (type))); if (init && BRACE_ENCLOSED_INITIALIZER_P (init)) { int init_len = CONSTRUCTOR_NELTS (init); if (SCALAR_TYPE_P (type)) { if (init_len == 0) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); init = build_zero_init (type, NULL_TREE, false); } else if (init_len != 1 && TREE_CODE (type) != COMPLEX_TYPE) { error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (decl)), "scalar object %qD requires one element in " "initializer", decl); TREE_TYPE (decl) = error_mark_node; return NULL_TREE; } } } if (TREE_CODE (decl) == CONST_DECL) { gcc_assert (!TYPE_REF_P (type)); DECL_INITIAL (decl) = init; gcc_assert (init != NULL_TREE); init = NULL_TREE; } else if (!init && DECL_REALLY_EXTERN (decl)) ; else if (init || type_build_ctor_call (type) || TYPE_REF_P (type)) { if (TYPE_REF_P (type)) { init = grok_reference_init (decl, type, init, flags); flags |= LOOKUP_ALREADY_DIGESTED; } else if (!init) check_for_uninitialized_const_var (decl, /*constexpr_context_p=*/false, tf_warning_or_error); /* Do not reshape constructors of vectors (they don't need to be reshaped. */ else if (BRACE_ENCLOSED_INITIALIZER_P (init)) { if (is_std_init_list (type)) { init = perform_implicit_conversion (type, init, tf_warning_or_error); flags |= LOOKUP_ALREADY_DIGESTED; } else if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Don't reshape if the class has constructors. */ if (cxx_dialect == cxx98) error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (decl)), "in C++98 %qD must be initialized by " "constructor, not by %<{...}%>", decl); } else if (VECTOR_TYPE_P (type) && TYPE_VECTOR_OPAQUE (type)) { error ("opaque vector types cannot be initialized"); init = error_mark_node; } else { init = reshape_init (type, init, tf_warning_or_error); flags |= LOOKUP_NO_NARROWING; } } /* [dcl.init] "Otherwise, if the destination type is an array, the object is initialized as follows..." So handle things like int a[](1, 2, 3); which is permitted in C++20 by P0960. */ else if (TREE_CODE (init) == TREE_LIST && TREE_TYPE (init) == NULL_TREE && TREE_CODE (type) == ARRAY_TYPE && !DECL_DECOMPOSITION_P (decl) && (cxx_dialect >= cxx2a)) { /* [dcl.init.string] "An array of ordinary character type [...] can be initialized by an ordinary string literal [...] by an appropriately-typed string literal enclosed in braces" only talks about braces, but GCC has always accepted char a[]("foobar"); so we continue to do so. */ tree val = TREE_VALUE (init); if (TREE_CHAIN (init) == NULL_TREE && char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type))) && TREE_CODE (tree_strip_any_location_wrapper (val)) == STRING_CST) /* If the list has a single element and it's a string literal, then it's the initializer for the array as a whole. */ init = val; else { init = build_constructor_from_list (init_list_type_node, init); CONSTRUCTOR_IS_DIRECT_INIT (init) = true; CONSTRUCTOR_IS_PAREN_INIT (init) = true; } } else if (TREE_CODE (init) == TREE_LIST && TREE_TYPE (init) != unknown_type_node && !MAYBE_CLASS_TYPE_P (type)) { gcc_assert (TREE_CODE (decl) != RESULT_DECL); /* We get here with code like `int a (2);' */ init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } /* If DECL has an array type without a specific bound, deduce the array size from the initializer. */ maybe_deduce_size_from_array_init (decl, init); type = TREE_TYPE (decl); if (type == error_mark_node) return NULL_TREE; if (((type_build_ctor_call (type) || CLASS_TYPE_P (type)) && !(flags & LOOKUP_ALREADY_DIGESTED) && !(init && BRACE_ENCLOSED_INITIALIZER_P (init) && CP_AGGREGATE_TYPE_P (type) && (CLASS_TYPE_P (type) || !TYPE_NEEDS_CONSTRUCTING (type) || type_has_extended_temps (type)))) || (DECL_DECOMPOSITION_P (decl) && TREE_CODE (type) == ARRAY_TYPE)) { init_code = build_aggr_init_full_exprs (decl, init, flags); /* A constructor call is a non-trivial initializer even if it isn't explicitly written. */ if (TREE_SIDE_EFFECTS (init_code)) DECL_NONTRIVIALLY_INITIALIZED_P (decl) = true; /* If this is a constexpr initializer, expand_default_init will have returned an INIT_EXPR rather than a CALL_EXPR. In that case, pull the initializer back out and pass it down into store_init_value. */ while (TREE_CODE (init_code) == EXPR_STMT || TREE_CODE (init_code) == CONVERT_EXPR) init_code = TREE_OPERAND (init_code, 0); if (TREE_CODE (init_code) == INIT_EXPR) { /* In C++20, the call to build_aggr_init could have created an INIT_EXPR with a CONSTRUCTOR as the RHS to handle A(1, 2). */ init = TREE_OPERAND (init_code, 1); init_code = NULL_TREE; /* Don't call digest_init; it's unnecessary and will complain about aggregate initialization of non-aggregate classes. */ flags |= LOOKUP_ALREADY_DIGESTED; } else if (DECL_DECLARED_CONSTEXPR_P (decl) || (flags & LOOKUP_CONSTINIT)) { /* Declared constexpr or constinit, but no suitable initializer; massage init appropriately so we can pass it into store_init_value for the error. */ if (CLASS_TYPE_P (type) && (!init || TREE_CODE (init) == TREE_LIST)) { init = build_functional_cast (input_location, type, init, tf_none); if (TREE_CODE (init) == TARGET_EXPR) TARGET_EXPR_DIRECT_INIT_P (init) = true; } init_code = NULL_TREE; } else init = NULL_TREE; } if (init && TREE_CODE (init) != TREE_VEC) { /* In aggregate initialization of a variable, each element initialization is a full-expression because there is no enclosing expression. */ gcc_assert (stmts_are_full_exprs_p ()); init_code = store_init_value (decl, init, cleanups, flags); if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) == CONSTRUCTOR && !vec_safe_is_empty (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)))) { tree elt = CONSTRUCTOR_ELTS (DECL_INITIAL (decl))->last ().value; if (TREE_CODE (TREE_TYPE (elt)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (elt)) == NULL_TREE) cp_complete_array_type (&TREE_TYPE (elt), elt, false); } if (pedantic && TREE_CODE (type) == ARRAY_TYPE && DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) == STRING_CST && PAREN_STRING_LITERAL_P (DECL_INITIAL (decl))) warning_at (cp_expr_loc_or_loc (DECL_INITIAL (decl), DECL_SOURCE_LOCATION (decl)), 0, "array %qD initialized by parenthesized " "string literal %qE", decl, DECL_INITIAL (decl)); init = NULL_TREE; } } else { if (CLASS_TYPE_P (core_type = strip_array_types (type)) && (CLASSTYPE_READONLY_FIELDS_NEED_INIT (core_type) || CLASSTYPE_REF_FIELDS_NEED_INIT (core_type))) diagnose_uninitialized_cst_or_ref_member (core_type, /*using_new=*/false, /*complain=*/true); check_for_uninitialized_const_var (decl, /*constexpr_context_p=*/false, tf_warning_or_error); } if (init && init != error_mark_node) init_code = build2 (INIT_EXPR, type, decl, init); if (init_code) { /* We might have set these in cp_finish_decl. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = false; TREE_CONSTANT (decl) = false; } if (init_code && DECL_IN_AGGR_P (decl) && DECL_INITIALIZED_IN_CLASS_P (decl)) { static int explained = 0; if (cxx_dialect < cxx11) error ("initializer invalid for static member with constructor"); else if (cxx_dialect < cxx17) error ("non-constant in-class initialization invalid for static " "member %qD", decl); else error ("non-constant in-class initialization invalid for non-inline " "static member %qD", decl); if (!explained) { inform (input_location, "(an out of class initialization is required)"); explained = 1; } return NULL_TREE; } return init_code; } /* If DECL is not a local variable, give it RTL. */ static void make_rtl_for_nonlocal_decl (tree decl, tree init, const char* asmspec) { int toplev = toplevel_bindings_p (); int defer_p; /* Set the DECL_ASSEMBLER_NAME for the object. */ if (asmspec) { /* The `register' keyword, when used together with an asm-specification, indicates that the variable should be placed in a particular register. */ if (VAR_P (decl) && DECL_REGISTER (decl)) { set_user_assembler_name (decl, asmspec); DECL_HARD_REGISTER (decl) = 1; } else { if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl, BUILT_IN_NORMAL)) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } } /* Handle non-variables up front. */ if (!VAR_P (decl)) { rest_of_decl_compilation (decl, toplev, at_eof); return; } /* If we see a class member here, it should be a static data member. */ if (DECL_LANG_SPECIFIC (decl) && DECL_IN_AGGR_P (decl)) { gcc_assert (TREE_STATIC (decl)); /* An in-class declaration of a static data member should be external; it is only a declaration, and not a definition. */ if (init == NULL_TREE) gcc_assert (DECL_EXTERNAL (decl) || !TREE_PUBLIC (decl)); } /* We don't create any RTL for local variables. */ if (DECL_FUNCTION_SCOPE_P (decl) && !TREE_STATIC (decl)) return; /* We defer emission of local statics until the corresponding DECL_EXPR is expanded. But with constexpr its function might never be expanded, so go ahead and tell cgraph about the variable now. */ defer_p = ((DECL_FUNCTION_SCOPE_P (decl) && !var_in_maybe_constexpr_fn (decl)) || DECL_VIRTUAL_P (decl)); /* Defer template instantiations. */ if (DECL_LANG_SPECIFIC (decl) && DECL_IMPLICIT_INSTANTIATION (decl)) defer_p = 1; /* If we're not deferring, go ahead and assemble the variable. */ if (!defer_p) rest_of_decl_compilation (decl, toplev, at_eof); } /* walk_tree helper for wrap_temporary_cleanups, below. */ static tree wrap_cleanups_r (tree *stmt_p, int *walk_subtrees, void *data) { /* Stop at types or full-expression boundaries. */ if (TYPE_P (*stmt_p) || TREE_CODE (*stmt_p) == CLEANUP_POINT_EXPR) { *walk_subtrees = 0; return NULL_TREE; } if (TREE_CODE (*stmt_p) == TARGET_EXPR) { tree guard = (tree)data; tree tcleanup = TARGET_EXPR_CLEANUP (*stmt_p); tcleanup = build2 (TRY_CATCH_EXPR, void_type_node, tcleanup, guard); /* Tell honor_protect_cleanup_actions to handle this as a separate cleanup. */ TRY_CATCH_IS_CLEANUP (tcleanup) = 1; TARGET_EXPR_CLEANUP (*stmt_p) = tcleanup; } return NULL_TREE; } /* We're initializing a local variable which has a cleanup GUARD. If there are any temporaries used in the initializer INIT of this variable, we need to wrap their cleanups with TRY_CATCH_EXPR (, GUARD) so that the variable will be cleaned up properly if one of them throws. Unfortunately, there's no way to express this properly in terms of nesting, as the regions for the temporaries overlap the region for the variable itself; if there are two temporaries, the variable needs to be the first thing destroyed if either of them throws. However, we only want to run the variable's cleanup if it actually got constructed. So we need to guard the temporary cleanups with the variable's cleanup if they are run on the normal path, but not if they are run on the exceptional path. We implement this by telling honor_protect_cleanup_actions to strip the variable cleanup from the exceptional path. */ static void wrap_temporary_cleanups (tree init, tree guard) { cp_walk_tree_without_duplicates (&init, wrap_cleanups_r, (void *)guard); } /* Generate code to initialize DECL (a local variable). */ static void initialize_local_var (tree decl, tree init) { tree type = TREE_TYPE (decl); tree cleanup; int already_used; gcc_assert (VAR_P (decl) || TREE_CODE (decl) == RESULT_DECL); gcc_assert (!TREE_STATIC (decl)); if (DECL_SIZE (decl) == NULL_TREE) { /* If we used it already as memory, it must stay in memory. */ DECL_INITIAL (decl) = NULL_TREE; TREE_ADDRESSABLE (decl) = TREE_USED (decl); return; } if (type == error_mark_node) return; /* Compute and store the initial value. */ already_used = TREE_USED (decl) || TREE_USED (type); if (TREE_USED (type)) DECL_READ_P (decl) = 1; /* Generate a cleanup, if necessary. */ cleanup = cxx_maybe_build_cleanup (decl, tf_warning_or_error); /* Perform the initialization. */ if (init) { tree rinit = (TREE_CODE (init) == INIT_EXPR ? TREE_OPERAND (init, 1) : NULL_TREE); if (rinit && !TREE_SIDE_EFFECTS (rinit)) { /* Stick simple initializers in DECL_INITIAL so that -Wno-init-self works (c++/34772). */ gcc_assert (TREE_OPERAND (init, 0) == decl); DECL_INITIAL (decl) = rinit; if (warn_init_self && TYPE_REF_P (type)) { STRIP_NOPS (rinit); if (rinit == decl) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Winit_self, "reference %qD is initialized with itself", decl); } } else { int saved_stmts_are_full_exprs_p; /* If we're only initializing a single object, guard the destructors of any temporaries used in its initializer with its destructor. This isn't right for arrays because each element initialization is a full-expression. */ if (cleanup && TREE_CODE (type) != ARRAY_TYPE) wrap_temporary_cleanups (init, cleanup); gcc_assert (building_stmt_list_p ()); saved_stmts_are_full_exprs_p = stmts_are_full_exprs_p (); current_stmt_tree ()->stmts_are_full_exprs_p = 1; finish_expr_stmt (init); current_stmt_tree ()->stmts_are_full_exprs_p = saved_stmts_are_full_exprs_p; } } /* Set this to 0 so we can tell whether an aggregate which was initialized was ever used. Don't do this if it has a destructor, so we don't complain about the 'resource allocation is initialization' idiom. Now set attribute((unused)) on types so decls of that type will be marked used. (see TREE_USED, above.) */ if (TYPE_NEEDS_CONSTRUCTING (type) && ! already_used && TYPE_HAS_TRIVIAL_DESTRUCTOR (type) && DECL_NAME (decl)) TREE_USED (decl) = 0; else if (already_used) TREE_USED (decl) = 1; if (cleanup) finish_decl_cleanup (decl, cleanup); } /* DECL is a VAR_DECL for a compiler-generated variable with static storage duration (like a virtual table) whose initializer is a compile-time constant. Initialize the variable and provide it to the back end. */ void initialize_artificial_var (tree decl, vec<constructor_elt, va_gc> *v) { tree init; gcc_assert (DECL_ARTIFICIAL (decl)); init = build_constructor (TREE_TYPE (decl), v); gcc_assert (TREE_CODE (init) == CONSTRUCTOR); DECL_INITIAL (decl) = init; DECL_INITIALIZED_P (decl) = 1; /* Mark the decl as constexpr so that we can access its content at compile time. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; DECL_DECLARED_CONSTEXPR_P (decl) = true; determine_visibility (decl); layout_var_decl (decl); maybe_commonize_var (decl); make_rtl_for_nonlocal_decl (decl, init, /*asmspec=*/NULL); } /* INIT is the initializer for a variable, as represented by the parser. Returns true iff INIT is value-dependent. */ static bool value_dependent_init_p (tree init) { if (TREE_CODE (init) == TREE_LIST) /* A parenthesized initializer, e.g.: int i (3, 2); ? */ return any_value_dependent_elements_p (init); else if (TREE_CODE (init) == CONSTRUCTOR) /* A brace-enclosed initializer, e.g.: int i = { 3 }; ? */ { if (dependent_type_p (TREE_TYPE (init))) return true; vec<constructor_elt, va_gc> *elts; size_t nelts; size_t i; elts = CONSTRUCTOR_ELTS (init); nelts = vec_safe_length (elts); for (i = 0; i < nelts; ++i) if (value_dependent_init_p ((*elts)[i].value)) return true; } else /* It must be a simple expression, e.g., int i = 3; */ return value_dependent_expression_p (init); return false; } // Returns true if a DECL is VAR_DECL with the concept specifier. static inline bool is_concept_var (tree decl) { return (VAR_P (decl) // Not all variables have DECL_LANG_SPECIFIC. && DECL_LANG_SPECIFIC (decl) && DECL_DECLARED_CONCEPT_P (decl)); } /* A helper function to be called via walk_tree. If any label exists under *TP, it is (going to be) forced. Set has_forced_label_in_static. */ static tree notice_forced_label_r (tree *tp, int *walk_subtrees, void *) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) cfun->has_forced_label_in_static = 1; return NULL_TREE; } /* Return true if DECL has either a trivial destructor, or for C++2A is constexpr and has a constexpr destructor. */ static bool decl_maybe_constant_destruction (tree decl, tree type) { return (TYPE_HAS_TRIVIAL_DESTRUCTOR (type) || (cxx_dialect >= cxx2a && VAR_P (decl) && DECL_DECLARED_CONSTEXPR_P (decl) && type_has_constexpr_destructor (strip_array_types (type)))); } static tree declare_simd_adjust_this (tree *, int *, void *); /* Helper function of omp_declare_variant_finalize. Finalize one "omp declare variant base" attribute. Return true if it should be removed. */ static bool omp_declare_variant_finalize_one (tree decl, tree attr) { if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) { walk_tree (&TREE_VALUE (TREE_VALUE (attr)), declare_simd_adjust_this, DECL_ARGUMENTS (decl), NULL); walk_tree (&TREE_PURPOSE (TREE_VALUE (attr)), declare_simd_adjust_this, DECL_ARGUMENTS (decl), NULL); } tree ctx = TREE_VALUE (TREE_VALUE (attr)); tree simd = omp_get_context_selector (ctx, "construct", "simd"); if (simd) { TREE_VALUE (simd) = c_omp_declare_simd_clauses_to_numbers (DECL_ARGUMENTS (decl), TREE_VALUE (simd)); /* FIXME, adjusting simd args unimplemented. */ return true; } tree chain = TREE_CHAIN (TREE_VALUE (attr)); location_t varid_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (TREE_CHAIN (chain))); location_t match_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (chain)); cp_id_kind idk = (cp_id_kind) tree_to_uhwi (TREE_VALUE (chain)); tree variant = TREE_PURPOSE (TREE_VALUE (attr)); location_t save_loc = input_location; input_location = varid_loc; releasing_vec args; tree parm = DECL_ARGUMENTS (decl); if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) parm = DECL_CHAIN (parm); for (; parm; parm = DECL_CHAIN (parm)) if (type_dependent_expression_p (parm)) vec_safe_push (args, build_constructor (TREE_TYPE (parm), NULL)); else if (MAYBE_CLASS_TYPE_P (TREE_TYPE (parm))) vec_safe_push (args, build_local_temp (TREE_TYPE (parm))); else vec_safe_push (args, build_zero_cst (TREE_TYPE (parm))); bool koenig_p = false; if (idk == CP_ID_KIND_UNQUALIFIED || idk == CP_ID_KIND_TEMPLATE_ID) { if (identifier_p (variant) /* In C++2A, we may need to perform ADL for a template name. */ || (TREE_CODE (variant) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (variant, 0)))) { if (!args->is_empty ()) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) variant = perform_koenig_lookup (variant, args, tf_warning_or_error); } else variant = unqualified_fn_lookup_error (variant); } else if (!args->is_empty () && is_overloaded_fn (variant)) { tree fn = get_first_fn (variant); fn = STRIP_TEMPLATE (fn); if (!((TREE_CODE (fn) == USING_DECL && DECL_DEPENDENT_P (fn)) || DECL_FUNCTION_MEMBER_P (fn) || DECL_LOCAL_FUNCTION_P (fn))) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) variant = perform_koenig_lookup (variant, args, tf_warning_or_error); } } } if (idk == CP_ID_KIND_QUALIFIED) variant = finish_call_expr (variant, &args, /*disallow_virtual=*/true, koenig_p, tf_warning_or_error); else variant = finish_call_expr (variant, &args, /*disallow_virtual=*/false, koenig_p, tf_warning_or_error); if (variant == error_mark_node && !processing_template_decl) return true; variant = cp_get_callee_fndecl_nofold (variant); input_location = save_loc; if (variant) { const char *varname = IDENTIFIER_POINTER (DECL_NAME (variant)); if (!comptypes (TREE_TYPE (decl), TREE_TYPE (variant), 0)) { error_at (varid_loc, "variant %qD and base %qD have incompatible " "types", variant, decl); return true; } if (fndecl_built_in_p (variant) && (strncmp (varname, "__builtin_", strlen ("__builtin_")) == 0 || strncmp (varname, "__sync_", strlen ("__sync_")) == 0 || strncmp (varname, "__atomic_", strlen ("__atomic_")) == 0)) { error_at (varid_loc, "variant %qD is a built-in", variant); return true; } else { tree construct = omp_get_context_selector (ctx, "construct", NULL); c_omp_mark_declare_variant (match_loc, variant, construct); if (!omp_context_selector_matches (ctx)) return true; TREE_PURPOSE (TREE_VALUE (attr)) = variant; } } else if (!processing_template_decl) { error_at (varid_loc, "could not find variant declaration"); return true; } return false; } /* Helper function, finish up "omp declare variant base" attribute now that there is a DECL. ATTR is the first "omp declare variant base" attribute. */ void omp_declare_variant_finalize (tree decl, tree attr) { size_t attr_len = strlen ("omp declare variant base"); tree *list = &DECL_ATTRIBUTES (decl); bool remove_all = false; location_t match_loc = DECL_SOURCE_LOCATION (decl); if (TREE_CHAIN (TREE_VALUE (attr)) && TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr))) && EXPR_HAS_LOCATION (TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr))))) match_loc = EXPR_LOCATION (TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr)))); if (DECL_CONSTRUCTOR_P (decl)) { error_at (match_loc, "%<declare variant%> on constructor %qD", decl); remove_all = true; } else if (DECL_DESTRUCTOR_P (decl)) { error_at (match_loc, "%<declare variant%> on destructor %qD", decl); remove_all = true; } else if (DECL_DEFAULTED_FN (decl)) { error_at (match_loc, "%<declare variant%> on defaulted %qD", decl); remove_all = true; } else if (DECL_DELETED_FN (decl)) { error_at (match_loc, "%<declare variant%> on deleted %qD", decl); remove_all = true; } else if (DECL_VIRTUAL_P (decl)) { error_at (match_loc, "%<declare variant%> on virtual %qD", decl); remove_all = true; } /* This loop is like private_lookup_attribute, except that it works with tree * rather than tree, as we might want to remove the attributes that are diagnosed as errorneous. */ while (*list) { tree attr = get_attribute_name (*list); size_t ident_len = IDENTIFIER_LENGTH (attr); if (cmp_attribs ("omp declare variant base", attr_len, IDENTIFIER_POINTER (attr), ident_len)) { if (remove_all || omp_declare_variant_finalize_one (decl, *list)) { *list = TREE_CHAIN (*list); continue; } } list = &TREE_CHAIN (*list); } } /* Finish processing of a declaration; install its line number and initial value. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT is the initializer (if any) for DECL. If INIT_CONST_EXPR_P is true, then INIT is an integral constant expression. FLAGS is LOOKUP_ONLYCONVERTING if the = init syntax was used, else 0 if the (init) syntax was used. */ void cp_finish_decl (tree decl, tree init, bool init_const_expr_p, tree asmspec_tree, int flags) { tree type; vec<tree, va_gc> *cleanups = NULL; const char *asmspec = NULL; int was_readonly = 0; bool var_definition_p = false; tree auto_node; if (decl == error_mark_node) return; else if (! decl) { if (init) error ("assignment (not initialization) in declaration"); return; } gcc_assert (TREE_CODE (decl) != RESULT_DECL); /* Parameters are handled by store_parm_decls, not cp_finish_decl. */ gcc_assert (TREE_CODE (decl) != PARM_DECL); type = TREE_TYPE (decl); if (type == error_mark_node) return; /* Warn about register storage specifiers except when in GNU global or local register variable extension. */ if (VAR_P (decl) && DECL_REGISTER (decl) && asmspec_tree == NULL_TREE) { if (cxx_dialect >= cxx17) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "ISO C++17 does not allow %<register%> storage " "class specifier"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "%<register%> storage class specifier used"); } /* If a name was specified, get the string. */ if (at_namespace_scope_p ()) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree && asmspec_tree != error_mark_node) asmspec = TREE_STRING_POINTER (asmspec_tree); if (current_class_type && CP_DECL_CONTEXT (decl) == current_class_type && TYPE_BEING_DEFINED (current_class_type) && !CLASSTYPE_TEMPLATE_INSTANTIATION (current_class_type) && (DECL_INITIAL (decl) || init)) DECL_INITIALIZED_IN_CLASS_P (decl) = 1; /* Do auto deduction unless decl is a function or an uninstantiated template specialization. */ if (TREE_CODE (decl) != FUNCTION_DECL && !(init == NULL_TREE && DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INSTANTIATION (decl) && !DECL_TEMPLATE_INSTANTIATED (decl)) && (auto_node = type_uses_auto (type))) { tree d_init; if (init == NULL_TREE) gcc_assert (CLASS_PLACEHOLDER_TEMPLATE (auto_node)); d_init = init; if (d_init) { if (TREE_CODE (d_init) == TREE_LIST && !CLASS_PLACEHOLDER_TEMPLATE (auto_node)) d_init = build_x_compound_expr_from_list (d_init, ELK_INIT, tf_warning_or_error); d_init = resolve_nondeduced_context (d_init, tf_warning_or_error); } enum auto_deduction_context adc = adc_variable_type; if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl)) adc = adc_decomp_type; type = TREE_TYPE (decl) = do_auto_deduction (type, d_init, auto_node, tf_warning_or_error, adc, NULL_TREE, flags); if (type == error_mark_node) return; if (TREE_CODE (type) == FUNCTION_TYPE) { error ("initializer for %<decltype(auto) %D%> has function type; " "did you forget the %<()%>?", decl); TREE_TYPE (decl) = error_mark_node; return; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); } if (ensure_literal_type_for_constexpr_object (decl) == error_mark_node) { DECL_DECLARED_CONSTEXPR_P (decl) = 0; if (VAR_P (decl) && DECL_CLASS_SCOPE_P (decl)) { init = NULL_TREE; DECL_EXTERNAL (decl) = 1; } } if (VAR_P (decl) && DECL_CLASS_SCOPE_P (decl) && verify_type_context (DECL_SOURCE_LOCATION (decl), TCTX_STATIC_STORAGE, type) && DECL_INITIALIZED_IN_CLASS_P (decl)) check_static_variable_definition (decl, type); if (init && TREE_CODE (decl) == FUNCTION_DECL) { tree clone; if (init == ridpointers[(int)RID_DELETE]) { /* FIXME check this is 1st decl. */ DECL_DELETED_FN (decl) = 1; DECL_DECLARED_INLINE_P (decl) = 1; DECL_INITIAL (decl) = error_mark_node; FOR_EACH_CLONE (clone, decl) { DECL_DELETED_FN (clone) = 1; DECL_DECLARED_INLINE_P (clone) = 1; DECL_INITIAL (clone) = error_mark_node; } init = NULL_TREE; } else if (init == ridpointers[(int)RID_DEFAULT]) { if (defaultable_fn_check (decl)) DECL_DEFAULTED_FN (decl) = 1; else DECL_INITIAL (decl) = NULL_TREE; } } if (init && VAR_P (decl)) { DECL_NONTRIVIALLY_INITIALIZED_P (decl) = 1; /* If DECL is a reference, then we want to know whether init is a reference constant; init_const_expr_p as passed tells us whether it's an rvalue constant. */ if (TYPE_REF_P (type)) init_const_expr_p = potential_constant_expression (init); if (init_const_expr_p) { /* Set these flags now for templates. We'll update the flags in store_init_value for instantiations. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = 1; if (decl_maybe_constant_var_p (decl) /* FIXME setting TREE_CONSTANT on refs breaks the back end. */ && !TYPE_REF_P (type)) TREE_CONSTANT (decl) = 1; } } if (flag_openmp && TREE_CODE (decl) == FUNCTION_DECL /* #pragma omp declare variant on methods handled in finish_struct instead. */ && (!DECL_NONSTATIC_MEMBER_FUNCTION_P (decl) || COMPLETE_TYPE_P (DECL_CONTEXT (decl)))) if (tree attr = lookup_attribute ("omp declare variant base", DECL_ATTRIBUTES (decl))) omp_declare_variant_finalize (decl, attr); if (processing_template_decl) { bool type_dependent_p; /* Add this declaration to the statement-tree. */ if (at_function_scope_p ()) add_decl_expr (decl); type_dependent_p = dependent_type_p (type); if (check_for_bare_parameter_packs (init)) { init = NULL_TREE; DECL_INITIAL (decl) = NULL_TREE; } /* Handle `constinit' on variable templates. */ if (flags & LOOKUP_CONSTINIT) TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (decl)) = true; /* Generally, initializers in templates are expanded when the template is instantiated. But, if DECL is a variable constant then it can be used in future constant expressions, so its value must be available. */ bool dep_init = false; if (!VAR_P (decl) || type_dependent_p) /* We can't do anything if the decl has dependent type. */; else if (!init && is_concept_var (decl)) { error ("variable concept has no initializer"); init = boolean_true_node; } else if (init && init_const_expr_p && !TYPE_REF_P (type) && decl_maybe_constant_var_p (decl) && !(dep_init = value_dependent_init_p (init))) { /* This variable seems to be a non-dependent constant, so process its initializer. If check_initializer returns non-null the initialization wasn't constant after all. */ tree init_code; cleanups = make_tree_vector (); init_code = check_initializer (decl, init, flags, &cleanups); if (init_code == NULL_TREE) init = NULL_TREE; release_tree_vector (cleanups); } else { gcc_assert (!DECL_PRETTY_FUNCTION_P (decl)); /* Deduce array size even if the initializer is dependent. */ maybe_deduce_size_from_array_init (decl, init); /* And complain about multiple initializers. */ if (init && TREE_CODE (init) == TREE_LIST && TREE_CHAIN (init) && !MAYBE_CLASS_TYPE_P (type)) init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } if (init) DECL_INITIAL (decl) = init; if (dep_init) { retrofit_lang_decl (decl); SET_DECL_DEPENDENT_INIT_P (decl, true); } if (VAR_P (decl) && DECL_REGISTER (decl) && asmspec) { set_user_assembler_name (decl, asmspec); DECL_HARD_REGISTER (decl) = 1; } return; } /* Just store non-static data member initializers for later. */ if (init && TREE_CODE (decl) == FIELD_DECL) DECL_INITIAL (decl) = init; /* Take care of TYPE_DECLs up front. */ if (TREE_CODE (decl) == TYPE_DECL) { if (type != error_mark_node && MAYBE_CLASS_TYPE_P (type) && DECL_NAME (decl)) { if (TREE_TYPE (DECL_NAME (decl)) && TREE_TYPE (decl) != type) warning (0, "shadowing previous type declaration of %q#D", decl); set_identifier_type_value (DECL_NAME (decl), decl); } /* If we have installed this as the canonical typedef for this type, and that type has not been defined yet, delay emitting the debug information for it, as we will emit it later. */ if (TYPE_MAIN_DECL (TREE_TYPE (decl)) == decl && !COMPLETE_TYPE_P (TREE_TYPE (decl))) TYPE_DECL_SUPPRESS_DEBUG (decl) = 1; rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), at_eof); return; } /* A reference will be modified here, as it is initialized. */ if (! DECL_EXTERNAL (decl) && TREE_READONLY (decl) && TYPE_REF_P (type)) { was_readonly = 1; TREE_READONLY (decl) = 0; } /* This needs to happen before extend_ref_init_temps. */ if (VAR_OR_FUNCTION_DECL_P (decl)) { if (VAR_P (decl)) maybe_commonize_var (decl); determine_visibility (decl); } if (VAR_P (decl)) { duration_kind dk = decl_storage_duration (decl); /* [dcl.constinit]/1 "The constinit specifier shall be applied only to a declaration of a variable with static or thread storage duration." */ if ((flags & LOOKUP_CONSTINIT) && !(dk == dk_thread || dk == dk_static)) { error_at (DECL_SOURCE_LOCATION (decl), "%<constinit%> can only be applied to a variable with " "static or thread storage duration"); return; } /* If this is a local variable that will need a mangled name, register it now. We must do this before processing the initializer for the variable, since the initialization might require a guard variable, and since the mangled name of the guard variable will depend on the mangled name of this variable. */ if (DECL_FUNCTION_SCOPE_P (decl) && TREE_STATIC (decl) && !DECL_ARTIFICIAL (decl)) { /* The variable holding an anonymous union will have had its discriminator set in finish_anon_union, after which it's NAME will have been cleared. */ if (DECL_NAME (decl)) determine_local_discriminator (decl); /* Normally has_forced_label_in_static is set during GIMPLE lowering, but [cd]tors are never actually compiled directly. We need to set this early so we can deal with the label address extension. */ if ((DECL_CONSTRUCTOR_P (current_function_decl) || DECL_DESTRUCTOR_P (current_function_decl)) && init) { walk_tree (&init, notice_forced_label_r, NULL, NULL); add_local_decl (cfun, decl); } /* And make sure it's in the symbol table for c_parse_final_cleanups to find. */ varpool_node::get_create (decl); } /* Convert the initializer to the type of DECL, if we have not already initialized DECL. */ if (!DECL_INITIALIZED_P (decl) /* If !DECL_EXTERNAL then DECL is being defined. In the case of a static data member initialized inside the class-specifier, there can be an initializer even if DECL is *not* defined. */ && (!DECL_EXTERNAL (decl) || init)) { cleanups = make_tree_vector (); init = check_initializer (decl, init, flags, &cleanups); /* Handle: [dcl.init] The memory occupied by any object of static storage duration is zero-initialized at program startup before any other initialization takes place. We cannot create an appropriate initializer until after the type of DECL is finalized. If DECL_INITIAL is set, then the DECL is statically initialized, and any necessary zero-initialization has already been performed. */ if (TREE_STATIC (decl) && !DECL_INITIAL (decl)) DECL_INITIAL (decl) = build_zero_init (TREE_TYPE (decl), /*nelts=*/NULL_TREE, /*static_storage_p=*/true); /* Remember that the initialization for this variable has taken place. */ DECL_INITIALIZED_P (decl) = 1; /* This declaration is the definition of this variable, unless we are initializing a static data member within the class specifier. */ if (!DECL_EXTERNAL (decl)) var_definition_p = true; } /* If the variable has an array type, lay out the type, even if there is no initializer. It is valid to index through the array, and we must get TYPE_ALIGN set correctly on the array type. */ else if (TREE_CODE (type) == ARRAY_TYPE) layout_type (type); if (TREE_STATIC (decl) && !at_function_scope_p () && current_function_decl == NULL) /* So decl is a global variable or a static member of a non local class. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); } /* Add this declaration to the statement-tree. This needs to happen after the call to check_initializer so that the DECL_EXPR for a reference temp is added before the DECL_EXPR for the reference itself. */ if (DECL_FUNCTION_SCOPE_P (decl)) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ if (VAR_P (decl) && DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_decl_expr (decl); } /* Let the middle end know about variables and functions -- but not static data members in uninstantiated class templates. */ if (VAR_OR_FUNCTION_DECL_P (decl)) { if (VAR_P (decl)) { layout_var_decl (decl); if (!flag_weak) /* Check again now that we have an initializer. */ maybe_commonize_var (decl); } if (var_definition_p /* With -fmerge-all-constants, gimplify_init_constructor might add TREE_STATIC to the variable. */ && (TREE_STATIC (decl) || flag_merge_constants >= 2)) { /* If a TREE_READONLY variable needs initialization at runtime, it is no longer readonly and we need to avoid MEM_READONLY_P being set on RTL created for it. */ if (init) { if (TREE_READONLY (decl)) TREE_READONLY (decl) = 0; was_readonly = 0; } else if (was_readonly) TREE_READONLY (decl) = 1; /* Likewise if it needs destruction. */ if (!decl_maybe_constant_destruction (decl, type)) TREE_READONLY (decl) = 0; } make_rtl_for_nonlocal_decl (decl, init, asmspec); /* Check for abstractness of the type. Notice that there is no need to strip array types here since the check for those types is already done within create_array_type_for_decl. */ abstract_virtuals_error (decl, type); if (TREE_TYPE (decl) == error_mark_node) /* No initialization required. */ ; else if (TREE_CODE (decl) == FUNCTION_DECL) { if (init) { if (init == ridpointers[(int)RID_DEFAULT]) { /* An out-of-class default definition is defined at the point where it is explicitly defaulted. */ if (DECL_DELETED_FN (decl)) maybe_explain_implicit_delete (decl); else if (DECL_INITIAL (decl) == error_mark_node) synthesize_method (decl); } else error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (decl)), "function %q#D is initialized like a variable", decl); } /* else no initialization required. */ } else if (DECL_EXTERNAL (decl) && ! (DECL_LANG_SPECIFIC (decl) && DECL_NOT_REALLY_EXTERN (decl))) { /* check_initializer will have done any constant initialization. */ } /* A variable definition. */ else if (DECL_FUNCTION_SCOPE_P (decl) && !TREE_STATIC (decl)) /* Initialize the local variable. */ initialize_local_var (decl, init); /* If a variable is defined, and then a subsequent definition with external linkage is encountered, we will get here twice for the same variable. We want to avoid calling expand_static_init more than once. For variables that are not static data members, we can call expand_static_init only when we actually process the initializer. It is not legal to redeclare a static data member, so this issue does not arise in that case. */ else if (var_definition_p && TREE_STATIC (decl)) expand_static_init (decl, init); } /* If a CLEANUP_STMT was created to destroy a temporary bound to a reference, insert it in the statement-tree now. */ if (cleanups) { unsigned i; tree t; FOR_EACH_VEC_ELT (*cleanups, i, t) push_cleanup (decl, t, false); release_tree_vector (cleanups); } if (was_readonly) TREE_READONLY (decl) = 1; if (flag_openmp && VAR_P (decl) && lookup_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl))) { DECL_ATTRIBUTES (decl) = remove_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl)); complete_type (TREE_TYPE (decl)); if (!cp_omp_mappable_type (TREE_TYPE (decl))) { error ("%q+D in declare target directive does not have mappable" " type", decl); cp_omp_emit_unmappable_type_notes (TREE_TYPE (decl)); } else if (!lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl)) && !lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("omp declare target"), NULL_TREE, DECL_ATTRIBUTES (decl)); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* For class TYPE return itself or some its bases that contain any direct non-static data members. Return error_mark_node if an error has been diagnosed. */ static tree find_decomp_class_base (location_t loc, tree type, tree ret) { bool member_seen = false; for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) != FIELD_DECL || DECL_ARTIFICIAL (field) || DECL_UNNAMED_BIT_FIELD (field)) continue; else if (ret) return type; else if (ANON_AGGR_TYPE_P (TREE_TYPE (field))) { if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE) error_at (loc, "cannot decompose class type %qT because it has an " "anonymous struct member", type); else error_at (loc, "cannot decompose class type %qT because it has an " "anonymous union member", type); inform (DECL_SOURCE_LOCATION (field), "declared here"); return error_mark_node; } else if (!accessible_p (type, field, true)) { error_at (loc, "cannot decompose inaccessible member %qD of %qT", field, type); inform (DECL_SOURCE_LOCATION (field), TREE_PRIVATE (field) ? G_("declared private here") : G_("declared protected here")); return error_mark_node; } else member_seen = true; tree base_binfo, binfo; tree orig_ret = ret; int i; if (member_seen) ret = type; for (binfo = TYPE_BINFO (type), i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) { tree t = find_decomp_class_base (loc, TREE_TYPE (base_binfo), ret); if (t == error_mark_node) return error_mark_node; if (t != NULL_TREE && t != ret) { if (ret == type) { error_at (loc, "cannot decompose class type %qT: both it and " "its base class %qT have non-static data members", type, t); return error_mark_node; } else if (orig_ret != NULL_TREE) return t; else if (ret != NULL_TREE) { error_at (loc, "cannot decompose class type %qT: its base " "classes %qT and %qT have non-static data " "members", type, ret, t); return error_mark_node; } else ret = t; } } return ret; } /* Return std::tuple_size<TYPE>::value. */ static tree get_tuple_size (tree type) { tree args = make_tree_vec (1); TREE_VEC_ELT (args, 0) = type; tree inst = lookup_template_class (tuple_size_identifier, args, /*in_decl*/NULL_TREE, /*context*/std_node, /*entering_scope*/false, tf_none); inst = complete_type (inst); if (inst == error_mark_node || !COMPLETE_TYPE_P (inst)) return NULL_TREE; tree val = lookup_qualified_name (inst, value_identifier, /*type*/false, /*complain*/false); if (TREE_CODE (val) == VAR_DECL || TREE_CODE (val) == CONST_DECL) val = maybe_constant_value (val); if (TREE_CODE (val) == INTEGER_CST) return val; else return error_mark_node; } /* Return std::tuple_element<I,TYPE>::type. */ static tree get_tuple_element_type (tree type, unsigned i) { tree args = make_tree_vec (2); TREE_VEC_ELT (args, 0) = build_int_cst (integer_type_node, i); TREE_VEC_ELT (args, 1) = type; tree inst = lookup_template_class (tuple_element_identifier, args, /*in_decl*/NULL_TREE, /*context*/std_node, /*entering_scope*/false, tf_warning_or_error); return make_typename_type (inst, type_identifier, none_type, tf_warning_or_error); } /* Return e.get<i>() or get<i>(e). */ static tree get_tuple_decomp_init (tree decl, unsigned i) { tree targs = make_tree_vec (1); TREE_VEC_ELT (targs, 0) = build_int_cst (integer_type_node, i); tree etype = TREE_TYPE (decl); tree e = convert_from_reference (decl); /* [The id-expression] e is an lvalue if the type of the entity e is an lvalue reference and an xvalue otherwise. */ if (!TYPE_REF_P (etype) || TYPE_REF_IS_RVALUE (etype)) e = move (e); tree fns = lookup_qualified_name (TREE_TYPE (e), get__identifier, /*type*/false, /*complain*/false); bool use_member_get = false; /* To use a member get, member lookup must find at least one declaration that is a function template whose first template parameter is a non-type parameter. */ for (lkp_iterator iter (MAYBE_BASELINK_FUNCTIONS (fns)); iter; ++iter) { tree fn = *iter; if (TREE_CODE (fn) == TEMPLATE_DECL) { tree tparms = DECL_TEMPLATE_PARMS (fn); tree parm = TREE_VEC_ELT (INNERMOST_TEMPLATE_PARMS (tparms), 0); if (TREE_CODE (TREE_VALUE (parm)) == PARM_DECL) { use_member_get = true; break; } } } if (use_member_get) { fns = lookup_template_function (fns, targs); return build_new_method_call (e, fns, /*args*/NULL, /*path*/NULL_TREE, LOOKUP_NORMAL, /*fn_p*/NULL, tf_warning_or_error); } else { releasing_vec args (make_tree_vector_single (e)); fns = lookup_template_function (get__identifier, targs); fns = perform_koenig_lookup (fns, args, tf_warning_or_error); return finish_call_expr (fns, &args, /*novirt*/false, /*koenig*/true, tf_warning_or_error); } } /* It's impossible to recover the decltype of a tuple decomposition variable based on the actual type of the variable, so store it in a hash table. */ static GTY((cache)) decl_tree_cache_map *decomp_type_table; tree lookup_decomp_type (tree v) { return *decomp_type_table->get (v); } /* Mangle a decomposition declaration if needed. Arguments like in cp_finish_decomp. */ void cp_maybe_mangle_decomp (tree decl, tree first, unsigned int count) { if (!processing_template_decl && !error_operand_p (decl) && TREE_STATIC (decl)) { auto_vec<tree, 16> v; v.safe_grow (count); tree d = first; for (unsigned int i = 0; i < count; i++, d = DECL_CHAIN (d)) v[count - i - 1] = d; SET_DECL_ASSEMBLER_NAME (decl, mangle_decomp (decl, v)); maybe_apply_pragma_weak (decl); } } /* Finish a decomposition declaration. DECL is the underlying declaration "e", FIRST is the head of a chain of decls for the individual identifiers chained through DECL_CHAIN in reverse order and COUNT is the number of those decls. */ void cp_finish_decomp (tree decl, tree first, unsigned int count) { if (error_operand_p (decl)) { error_out: while (count--) { TREE_TYPE (first) = error_mark_node; if (DECL_HAS_VALUE_EXPR_P (first)) { SET_DECL_VALUE_EXPR (first, NULL_TREE); DECL_HAS_VALUE_EXPR_P (first) = 0; } first = DECL_CHAIN (first); } if (DECL_P (decl) && DECL_NAMESPACE_SCOPE_P (decl)) SET_DECL_ASSEMBLER_NAME (decl, get_identifier ("<decomp>")); return; } location_t loc = DECL_SOURCE_LOCATION (decl); if (type_dependent_expression_p (decl) /* This happens for range for when not in templates. Still add the DECL_VALUE_EXPRs for later processing. */ || (!processing_template_decl && type_uses_auto (TREE_TYPE (decl)))) { for (unsigned int i = 0; i < count; i++) { if (!DECL_HAS_VALUE_EXPR_P (first)) { tree v = build_nt (ARRAY_REF, decl, size_int (count - i - 1), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (first, v); DECL_HAS_VALUE_EXPR_P (first) = 1; } if (processing_template_decl) fit_decomposition_lang_decl (first, decl); first = DECL_CHAIN (first); } return; } auto_vec<tree, 16> v; v.safe_grow (count); tree d = first; for (unsigned int i = 0; i < count; i++, d = DECL_CHAIN (d)) { v[count - i - 1] = d; fit_decomposition_lang_decl (d, decl); } tree type = TREE_TYPE (decl); tree dexp = decl; if (TYPE_REF_P (type)) { dexp = convert_from_reference (dexp); type = complete_type (TREE_TYPE (type)); if (type == error_mark_node) goto error_out; if (!COMPLETE_TYPE_P (type)) { error_at (loc, "structured binding refers to incomplete type %qT", type); goto error_out; } } tree eltype = NULL_TREE; unsigned HOST_WIDE_INT eltscnt = 0; if (TREE_CODE (type) == ARRAY_TYPE) { tree nelts; nelts = array_type_nelts_top (type); if (nelts == error_mark_node) goto error_out; if (!tree_fits_uhwi_p (nelts)) { error_at (loc, "cannot decompose variable length array %qT", type); goto error_out; } eltscnt = tree_to_uhwi (nelts); if (count != eltscnt) { cnt_mismatch: if (count > eltscnt) error_n (loc, count, "%u name provided for structured binding", "%u names provided for structured binding", count); else error_n (loc, count, "only %u name provided for structured binding", "only %u names provided for structured binding", count); inform_n (loc, eltscnt, "while %qT decomposes into %wu element", "while %qT decomposes into %wu elements", type, eltscnt); goto error_out; } eltype = TREE_TYPE (type); for (unsigned int i = 0; i < count; i++) { TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (processing_template_decl) continue; tree t = unshare_expr (dexp); t = build4_loc (DECL_SOURCE_LOCATION (v[i]), ARRAY_REF, eltype, t, size_int (i), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (v[i], t); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } /* 2 GNU extensions. */ else if (TREE_CODE (type) == COMPLEX_TYPE) { eltscnt = 2; if (count != eltscnt) goto cnt_mismatch; eltype = cp_build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type)); for (unsigned int i = 0; i < count; i++) { TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (processing_template_decl) continue; tree t = unshare_expr (dexp); t = build1_loc (DECL_SOURCE_LOCATION (v[i]), i ? IMAGPART_EXPR : REALPART_EXPR, eltype, t); SET_DECL_VALUE_EXPR (v[i], t); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } else if (TREE_CODE (type) == VECTOR_TYPE) { if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&eltscnt)) { error_at (loc, "cannot decompose variable length vector %qT", type); goto error_out; } if (count != eltscnt) goto cnt_mismatch; eltype = cp_build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type)); for (unsigned int i = 0; i < count; i++) { TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (processing_template_decl) continue; tree t = unshare_expr (dexp); convert_vector_to_array_for_subscript (DECL_SOURCE_LOCATION (v[i]), &t, size_int (i)); t = build4_loc (DECL_SOURCE_LOCATION (v[i]), ARRAY_REF, eltype, t, size_int (i), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (v[i], t); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } else if (tree tsize = get_tuple_size (type)) { if (tsize == error_mark_node) { error_at (loc, "%<std::tuple_size<%T>::value%> is not an integral " "constant expression", type); goto error_out; } if (!tree_fits_uhwi_p (tsize)) { error_n (loc, count, "%u name provided for structured binding", "%u names provided for structured binding", count); inform (loc, "while %qT decomposes into %E elements", type, tsize); goto error_out; } eltscnt = tree_to_uhwi (tsize); if (count != eltscnt) goto cnt_mismatch; int save_read = DECL_READ_P (decl); for (unsigned i = 0; i < count; ++i) { location_t sloc = input_location; location_t dloc = DECL_SOURCE_LOCATION (v[i]); input_location = dloc; tree init = get_tuple_decomp_init (decl, i); tree eltype = (init == error_mark_node ? error_mark_node : get_tuple_element_type (type, i)); input_location = sloc; if (VOID_TYPE_P (eltype)) { error ("%<std::tuple_element<%u, %T>::type%> is %<void%>", i, type); eltype = error_mark_node; } if (init == error_mark_node || eltype == error_mark_node) { inform (dloc, "in initialization of structured binding " "variable %qD", v[i]); goto error_out; } /* Save the decltype away before reference collapse. */ hash_map_safe_put<hm_ggc> (decomp_type_table, v[i], eltype); eltype = cp_build_reference_type (eltype, !lvalue_p (init)); TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (DECL_HAS_VALUE_EXPR_P (v[i])) { /* In this case the names are variables, not just proxies. */ SET_DECL_VALUE_EXPR (v[i], NULL_TREE); DECL_HAS_VALUE_EXPR_P (v[i]) = 0; } if (!processing_template_decl) { copy_linkage (v[i], decl); cp_finish_decl (v[i], init, /*constexpr*/false, /*asm*/NULL_TREE, LOOKUP_NORMAL); } } /* Ignore reads from the underlying decl performed during initialization of the individual variables. If those will be read, we'll mark the underlying decl as read at that point. */ DECL_READ_P (decl) = save_read; } else if (TREE_CODE (type) == UNION_TYPE) { error_at (loc, "cannot decompose union type %qT", type); goto error_out; } else if (!CLASS_TYPE_P (type)) { error_at (loc, "cannot decompose non-array non-class type %qT", type); goto error_out; } else if (LAMBDA_TYPE_P (type)) { error_at (loc, "cannot decompose lambda closure type %qT", type); goto error_out; } else if (processing_template_decl && complete_type (type) == error_mark_node) goto error_out; else if (processing_template_decl && !COMPLETE_TYPE_P (type)) pedwarn (loc, 0, "structured binding refers to incomplete class type %qT", type); else { tree btype = find_decomp_class_base (loc, type, NULL_TREE); if (btype == error_mark_node) goto error_out; else if (btype == NULL_TREE) { error_at (loc, "cannot decompose class type %qT without non-static " "data members", type); goto error_out; } for (tree field = TYPE_FIELDS (btype); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) != FIELD_DECL || DECL_ARTIFICIAL (field) || DECL_UNNAMED_BIT_FIELD (field)) continue; else eltscnt++; if (count != eltscnt) goto cnt_mismatch; tree t = dexp; if (type != btype) { t = convert_to_base (t, btype, /*check_access*/true, /*nonnull*/false, tf_warning_or_error); type = btype; } unsigned int i = 0; for (tree field = TYPE_FIELDS (btype); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) != FIELD_DECL || DECL_ARTIFICIAL (field) || DECL_UNNAMED_BIT_FIELD (field)) continue; else { tree tt = finish_non_static_data_member (field, unshare_expr (t), NULL_TREE); if (REFERENCE_REF_P (tt)) tt = TREE_OPERAND (tt, 0); TREE_TYPE (v[i]) = TREE_TYPE (tt); layout_decl (v[i], 0); if (!processing_template_decl) { SET_DECL_VALUE_EXPR (v[i], tt); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } i++; } } if (processing_template_decl) { for (unsigned int i = 0; i < count; i++) if (!DECL_HAS_VALUE_EXPR_P (v[i])) { tree a = build_nt (ARRAY_REF, decl, size_int (i), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (v[i], a); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } } /* Returns a declaration for a VAR_DECL as if: extern "C" TYPE NAME; had been seen. Used to create compiler-generated global variables. */ static tree declare_global_var (tree name, tree type) { tree decl; push_to_top_level (); decl = build_decl (input_location, VAR_DECL, name, type); TREE_PUBLIC (decl) = 1; DECL_EXTERNAL (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_CONTEXT (decl) = FROB_CONTEXT (global_namespace); /* If the user has explicitly declared this variable (perhaps because the code we are compiling is part of a low-level runtime library), then it is possible that our declaration will be merged with theirs by pushdecl. */ decl = pushdecl (decl); cp_finish_decl (decl, NULL_TREE, false, NULL_TREE, 0); pop_from_top_level (); return decl; } /* Returns the type for the argument to "__cxa_atexit" (or "atexit", if "__cxa_atexit" is not being used) corresponding to the function to be called when the program exits. */ static tree get_atexit_fn_ptr_type (void) { tree fn_type; if (!atexit_fn_ptr_type_node) { tree arg_type; if (flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit ()) /* The parameter to "__cxa_atexit" is "void (*)(void *)". */ arg_type = ptr_type_node; else /* The parameter to "atexit" is "void (*)(void)". */ arg_type = NULL_TREE; fn_type = build_function_type_list (void_type_node, arg_type, NULL_TREE); atexit_fn_ptr_type_node = build_pointer_type (fn_type); } return atexit_fn_ptr_type_node; } /* Returns a pointer to the `atexit' function. Note that if FLAG_USE_CXA_ATEXIT is nonzero, then this will actually be the new `__cxa_atexit' function specified in the IA64 C++ ABI. */ static tree get_atexit_node (void) { tree atexit_fndecl; tree fn_type; tree fn_ptr_type; const char *name; bool use_aeabi_atexit; if (atexit_node) return atexit_node; if (flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit ()) { /* The declaration for `__cxa_atexit' is: int __cxa_atexit (void (*)(void *), void *, void *) We build up the argument types and then the function type itself. */ tree argtype0, argtype1, argtype2; use_aeabi_atexit = targetm.cxx.use_aeabi_atexit (); /* First, build the pointer-to-function type for the first argument. */ fn_ptr_type = get_atexit_fn_ptr_type (); /* Then, build the rest of the argument types. */ argtype2 = ptr_type_node; if (use_aeabi_atexit) { argtype1 = fn_ptr_type; argtype0 = ptr_type_node; } else { argtype1 = ptr_type_node; argtype0 = fn_ptr_type; } /* And the final __cxa_atexit type. */ fn_type = build_function_type_list (integer_type_node, argtype0, argtype1, argtype2, NULL_TREE); if (use_aeabi_atexit) name = "__aeabi_atexit"; else name = "__cxa_atexit"; } else { /* The declaration for `atexit' is: int atexit (void (*)()); We build up the argument types and then the function type itself. */ fn_ptr_type = get_atexit_fn_ptr_type (); /* Build the final atexit type. */ fn_type = build_function_type_list (integer_type_node, fn_ptr_type, NULL_TREE); name = "atexit"; } /* Now, build the function declaration. */ push_lang_context (lang_name_c); atexit_fndecl = build_library_fn_ptr (name, fn_type, ECF_LEAF | ECF_NOTHROW); mark_used (atexit_fndecl); pop_lang_context (); atexit_node = decay_conversion (atexit_fndecl, tf_warning_or_error); return atexit_node; } /* Like get_atexit_node, but for thread-local cleanups. */ static tree get_thread_atexit_node (void) { /* The declaration for `__cxa_thread_atexit' is: int __cxa_thread_atexit (void (*)(void *), void *, void *) */ tree fn_type = build_function_type_list (integer_type_node, get_atexit_fn_ptr_type (), ptr_type_node, ptr_type_node, NULL_TREE); /* Now, build the function declaration. */ tree atexit_fndecl = build_library_fn_ptr ("__cxa_thread_atexit", fn_type, ECF_LEAF | ECF_NOTHROW); return decay_conversion (atexit_fndecl, tf_warning_or_error); } /* Returns the __dso_handle VAR_DECL. */ static tree get_dso_handle_node (void) { if (dso_handle_node) return dso_handle_node; /* Declare the variable. */ dso_handle_node = declare_global_var (get_identifier ("__dso_handle"), ptr_type_node); #ifdef HAVE_GAS_HIDDEN if (dso_handle_node != error_mark_node) { DECL_VISIBILITY (dso_handle_node) = VISIBILITY_HIDDEN; DECL_VISIBILITY_SPECIFIED (dso_handle_node) = 1; } #endif return dso_handle_node; } /* Begin a new function with internal linkage whose job will be simply to destroy some particular variable. */ static GTY(()) int start_cleanup_cnt; static tree start_cleanup_fn (void) { char name[32]; tree fntype; tree fndecl; bool use_cxa_atexit = flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit (); push_to_top_level (); /* No need to mangle this. */ push_lang_context (lang_name_c); /* Build the name of the function. */ sprintf (name, "__tcf_%d", start_cleanup_cnt++); /* Build the function declaration. */ fntype = TREE_TYPE (get_atexit_fn_ptr_type ()); fndecl = build_lang_decl (FUNCTION_DECL, get_identifier (name), fntype); /* It's a function with internal linkage, generated by the compiler. */ TREE_PUBLIC (fndecl) = 0; DECL_ARTIFICIAL (fndecl) = 1; /* Make the function `inline' so that it is only emitted if it is actually needed. It is unlikely that it will be inlined, since it is only called via a function pointer, but we avoid unnecessary emissions this way. */ DECL_DECLARED_INLINE_P (fndecl) = 1; DECL_INTERFACE_KNOWN (fndecl) = 1; /* Build the parameter. */ if (use_cxa_atexit) { tree parmdecl = cp_build_parm_decl (fndecl, NULL_TREE, ptr_type_node); TREE_USED (parmdecl) = 1; DECL_READ_P (parmdecl) = 1; DECL_ARGUMENTS (fndecl) = parmdecl; } pushdecl (fndecl); start_preparsed_function (fndecl, NULL_TREE, SF_PRE_PARSED); pop_lang_context (); return current_function_decl; } /* Finish the cleanup function begun by start_cleanup_fn. */ static void end_cleanup_fn (void) { expand_or_defer_fn (finish_function (/*inline_p=*/false)); pop_from_top_level (); } /* Generate code to handle the destruction of DECL, an object with static storage duration. */ tree register_dtor_fn (tree decl) { tree cleanup; tree addr; tree compound_stmt; tree fcall; tree type; bool ob_parm, dso_parm, use_dtor; tree arg0, arg1, arg2; tree atex_node; type = TREE_TYPE (decl); if (TYPE_HAS_TRIVIAL_DESTRUCTOR (type)) return void_node; if (decl_maybe_constant_destruction (decl, type) && DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) { cxx_maybe_build_cleanup (decl, tf_warning_or_error); return void_node; } /* If we're using "__cxa_atexit" (or "__cxa_thread_atexit" or "__aeabi_atexit"), and DECL is a class object, we can just pass the destructor to "__cxa_atexit"; we don't have to build a temporary function to do the cleanup. */ dso_parm = (flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit ()); ob_parm = (CP_DECL_THREAD_LOCAL_P (decl) || dso_parm); use_dtor = ob_parm && CLASS_TYPE_P (type); if (use_dtor) { cleanup = get_class_binding (type, complete_dtor_identifier); /* Make sure it is accessible. */ perform_or_defer_access_check (TYPE_BINFO (type), cleanup, cleanup, tf_warning_or_error); } else { /* Call build_cleanup before we enter the anonymous function so that any access checks will be done relative to the current scope, rather than the scope of the anonymous function. */ build_cleanup (decl); /* Now start the function. */ cleanup = start_cleanup_fn (); /* Now, recompute the cleanup. It may contain SAVE_EXPRs that refer to the original function, rather than the anonymous one. That will make the back end think that nested functions are in use, which causes confusion. */ push_deferring_access_checks (dk_no_check); fcall = build_cleanup (decl); pop_deferring_access_checks (); /* Create the body of the anonymous function. */ compound_stmt = begin_compound_stmt (BCS_FN_BODY); finish_expr_stmt (fcall); finish_compound_stmt (compound_stmt); end_cleanup_fn (); } /* Call atexit with the cleanup function. */ mark_used (cleanup); cleanup = build_address (cleanup); if (CP_DECL_THREAD_LOCAL_P (decl)) atex_node = get_thread_atexit_node (); else atex_node = get_atexit_node (); if (use_dtor) { /* We must convert CLEANUP to the type that "__cxa_atexit" expects. */ cleanup = build_nop (get_atexit_fn_ptr_type (), cleanup); /* "__cxa_atexit" will pass the address of DECL to the cleanup function. */ mark_used (decl); addr = build_address (decl); /* The declared type of the parameter to "__cxa_atexit" is "void *". For plain "T*", we could just let the machinery in cp_build_function_call convert it -- but if the type is "cv-qualified T *", then we need to convert it before passing it in, to avoid spurious errors. */ addr = build_nop (ptr_type_node, addr); } else /* Since the cleanup functions we build ignore the address they're given, there's no reason to pass the actual address in, and, in general, it's cheaper to pass NULL than any other value. */ addr = null_pointer_node; if (dso_parm) arg2 = cp_build_addr_expr (get_dso_handle_node (), tf_warning_or_error); else if (ob_parm) /* Just pass NULL to the dso handle parm if we don't actually have a DSO handle on this target. */ arg2 = null_pointer_node; else arg2 = NULL_TREE; if (ob_parm) { if (!CP_DECL_THREAD_LOCAL_P (decl) && targetm.cxx.use_aeabi_atexit ()) { arg1 = cleanup; arg0 = addr; } else { arg1 = addr; arg0 = cleanup; } } else { arg0 = cleanup; arg1 = NULL_TREE; } return cp_build_function_call_nary (atex_node, tf_warning_or_error, arg0, arg1, arg2, NULL_TREE); } /* DECL is a VAR_DECL with static storage duration. INIT, if present, is its initializer. Generate code to handle the construction and destruction of DECL. */ static void expand_static_init (tree decl, tree init) { gcc_assert (VAR_P (decl)); gcc_assert (TREE_STATIC (decl)); /* Some variables require no dynamic initialization. */ if (decl_maybe_constant_destruction (decl, TREE_TYPE (decl))) { /* Make sure the destructor is callable. */ cxx_maybe_build_cleanup (decl, tf_warning_or_error); if (!init) return; } if (CP_DECL_THREAD_LOCAL_P (decl) && DECL_GNU_TLS_P (decl) && !DECL_FUNCTION_SCOPE_P (decl)) { location_t dloc = DECL_SOURCE_LOCATION (decl); if (init) error_at (dloc, "non-local variable %qD declared %<__thread%> " "needs dynamic initialization", decl); else error_at (dloc, "non-local variable %qD declared %<__thread%> " "has a non-trivial destructor", decl); static bool informed; if (!informed) { inform (dloc, "C++11 %<thread_local%> allows dynamic " "initialization and destruction"); informed = true; } return; } if (DECL_FUNCTION_SCOPE_P (decl)) { /* Emit code to perform this initialization but once. */ tree if_stmt = NULL_TREE, inner_if_stmt = NULL_TREE; tree then_clause = NULL_TREE, inner_then_clause = NULL_TREE; tree guard, guard_addr; tree flag, begin; /* We don't need thread-safety code for thread-local vars. */ bool thread_guard = (flag_threadsafe_statics && !CP_DECL_THREAD_LOCAL_P (decl)); /* Emit code to perform this initialization but once. This code looks like: static <type> guard; if (!__atomic_load (guard.first_byte)) { if (__cxa_guard_acquire (&guard)) { bool flag = false; try { // Do initialization. flag = true; __cxa_guard_release (&guard); // Register variable for destruction at end of program. } catch { if (!flag) __cxa_guard_abort (&guard); } } } Note that the `flag' variable is only set to 1 *after* the initialization is complete. This ensures that an exception, thrown during the construction, will cause the variable to reinitialized when we pass through this code again, as per: [stmt.dcl] If the initialization exits by throwing an exception, the initialization is not complete, so it will be tried again the next time control enters the declaration. This process should be thread-safe, too; multiple threads should not be able to initialize the variable more than once. */ /* Create the guard variable. */ guard = get_guard (decl); /* Begin the conditional initialization. */ if_stmt = begin_if_stmt (); finish_if_stmt_cond (get_guard_cond (guard, thread_guard), if_stmt); then_clause = begin_compound_stmt (BCS_NO_SCOPE); if (thread_guard) { tree vfntype = NULL_TREE; tree acquire_name, release_name, abort_name; tree acquire_fn, release_fn, abort_fn; guard_addr = build_address (guard); acquire_name = get_identifier ("__cxa_guard_acquire"); release_name = get_identifier ("__cxa_guard_release"); abort_name = get_identifier ("__cxa_guard_abort"); acquire_fn = get_global_binding (acquire_name); release_fn = get_global_binding (release_name); abort_fn = get_global_binding (abort_name); if (!acquire_fn) acquire_fn = push_library_fn (acquire_name, build_function_type_list (integer_type_node, TREE_TYPE (guard_addr), NULL_TREE), NULL_TREE, ECF_NOTHROW); if (!release_fn || !abort_fn) vfntype = build_function_type_list (void_type_node, TREE_TYPE (guard_addr), NULL_TREE); if (!release_fn) release_fn = push_library_fn (release_name, vfntype, NULL_TREE, ECF_NOTHROW); if (!abort_fn) abort_fn = push_library_fn (abort_name, vfntype, NULL_TREE, ECF_NOTHROW | ECF_LEAF); inner_if_stmt = begin_if_stmt (); finish_if_stmt_cond (build_call_n (acquire_fn, 1, guard_addr), inner_if_stmt); inner_then_clause = begin_compound_stmt (BCS_NO_SCOPE); begin = get_target_expr (boolean_false_node); flag = TARGET_EXPR_SLOT (begin); TARGET_EXPR_CLEANUP (begin) = build3 (COND_EXPR, void_type_node, flag, void_node, build_call_n (abort_fn, 1, guard_addr)); CLEANUP_EH_ONLY (begin) = 1; /* Do the initialization itself. */ init = add_stmt_to_compound (begin, init); init = add_stmt_to_compound (init, build2 (MODIFY_EXPR, void_type_node, flag, boolean_true_node)); /* Use atexit to register a function for destroying this static variable. Do this before calling __cxa_guard_release. */ init = add_stmt_to_compound (init, register_dtor_fn (decl)); init = add_stmt_to_compound (init, build_call_n (release_fn, 1, guard_addr)); } else { init = add_stmt_to_compound (init, set_guard (guard)); /* Use atexit to register a function for destroying this static variable. */ init = add_stmt_to_compound (init, register_dtor_fn (decl)); } finish_expr_stmt (init); if (thread_guard) { finish_compound_stmt (inner_then_clause); finish_then_clause (inner_if_stmt); finish_if_stmt (inner_if_stmt); } finish_compound_stmt (then_clause); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } else if (CP_DECL_THREAD_LOCAL_P (decl)) tls_aggregates = tree_cons (init, decl, tls_aggregates); else static_aggregates = tree_cons (init, decl, static_aggregates); } /* Make TYPE a complete type based on INITIAL_VALUE. Return 0 if successful, 1 if INITIAL_VALUE can't be deciphered, 2 if there was no information (in which case assume 0 if DO_DEFAULT), 3 if the initializer list is empty (in pedantic mode). */ int cp_complete_array_type (tree *ptype, tree initial_value, bool do_default) { int failure; tree type, elt_type; /* Don't get confused by a CONSTRUCTOR for some other type. */ if (initial_value && TREE_CODE (initial_value) == CONSTRUCTOR && !BRACE_ENCLOSED_INITIALIZER_P (initial_value) && TREE_CODE (TREE_TYPE (initial_value)) != ARRAY_TYPE) return 1; if (initial_value) { unsigned HOST_WIDE_INT i; tree value; /* An array of character type can be initialized from a brace-enclosed string constant. FIXME: this code is duplicated from reshape_init. Probably we should just call reshape_init here? */ if (char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (*ptype))) && TREE_CODE (initial_value) == CONSTRUCTOR && !vec_safe_is_empty (CONSTRUCTOR_ELTS (initial_value))) { vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initial_value); tree value = (*v)[0].value; STRIP_ANY_LOCATION_WRAPPER (value); if (TREE_CODE (value) == STRING_CST && v->length () == 1) initial_value = value; } /* If any of the elements are parameter packs, we can't actually complete this type now because the array size is dependent. */ if (TREE_CODE (initial_value) == CONSTRUCTOR) { FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (initial_value), i, value) { if (PACK_EXPANSION_P (value)) return 0; } } } failure = complete_array_type (ptype, initial_value, do_default); /* We can create the array before the element type is complete, which means that we didn't have these two bits set in the original type either. In completing the type, we are expected to propagate these bits. See also complete_type which does the same thing for arrays of fixed size. */ type = *ptype; if (type != error_mark_node && TYPE_DOMAIN (type)) { elt_type = TREE_TYPE (type); TYPE_NEEDS_CONSTRUCTING (type) = TYPE_NEEDS_CONSTRUCTING (elt_type); TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) = TYPE_HAS_NONTRIVIAL_DESTRUCTOR (elt_type); } return failure; } /* As above, but either give an error or reject zero-size arrays, depending on COMPLAIN. */ int cp_complete_array_type_or_error (tree *ptype, tree initial_value, bool do_default, tsubst_flags_t complain) { int failure; bool sfinae = !(complain & tf_error); /* In SFINAE context we can't be lenient about zero-size arrays. */ if (sfinae) ++pedantic; failure = cp_complete_array_type (ptype, initial_value, do_default); if (sfinae) --pedantic; if (failure) { if (sfinae) /* Not an error. */; else if (failure == 1) error ("initializer fails to determine size of %qT", *ptype); else if (failure == 2) { if (do_default) error ("array size missing in %qT", *ptype); } else if (failure == 3) error ("zero-size array %qT", *ptype); *ptype = error_mark_node; } return failure; } /* Return zero if something is declared to be a member of type CTYPE when in the context of CUR_TYPE. STRING is the error message to print in that case. Otherwise, quietly return 1. */ static int member_function_or_else (tree ctype, tree cur_type, enum overload_flags flags) { if (ctype && ctype != cur_type) { if (flags == DTOR_FLAG) error ("destructor for alien class %qT cannot be a member", ctype); else error ("constructor for alien class %qT cannot be a member", ctype); return 0; } return 1; } /* Subroutine of `grokdeclarator'. */ /* Generate errors possibly applicable for a given set of specifiers. This is for ARM $7.1.2. */ static void bad_specifiers (tree object, enum bad_spec_place type, int virtualp, int quals, int inlinep, int friendp, int raises, const location_t* locations) { switch (type) { case BSP_VAR: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> variable", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in variable declaration", object); break; case BSP_PARM: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> parameter", object); if (inlinep) error_at (locations[ds_inline], "%qD declared as an %<inline%> parameter", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in parameter declaration", object); break; case BSP_TYPE: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> type", object); if (inlinep) error_at (locations[ds_inline], "%qD declared as an %<inline%> type", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in type declaration", object); break; case BSP_FIELD: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> field", object); if (inlinep) error_at (locations[ds_inline], "%qD declared as an %<inline%> field", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in field declaration", object); break; default: gcc_unreachable(); } if (friendp) error ("%q+D declared as a friend", object); if (raises && !flag_noexcept_type && (TREE_CODE (object) == TYPE_DECL || (!TYPE_PTRFN_P (TREE_TYPE (object)) && !TYPE_REFFN_P (TREE_TYPE (object)) && !TYPE_PTRMEMFUNC_P (TREE_TYPE (object))))) error ("%q+D declared with an exception specification", object); } /* DECL is a member function or static data member and is presently being defined. Check that the definition is taking place in a valid namespace. */ static void check_class_member_definition_namespace (tree decl) { /* These checks only apply to member functions and static data members. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (decl)); /* We check for problems with specializations in pt.c in check_specialization_namespace, where we can issue better diagnostics. */ if (processing_specialization) return; /* We check this in check_explicit_instantiation_namespace. */ if (processing_explicit_instantiation) return; /* [class.mfct] A member function definition that appears outside of the class definition shall appear in a namespace scope enclosing the class definition. [class.static.data] The definition for a static data member shall appear in a namespace scope enclosing the member's class definition. */ if (!is_ancestor (current_namespace, DECL_CONTEXT (decl))) permerror (input_location, "definition of %qD is not in namespace enclosing %qT", decl, DECL_CONTEXT (decl)); } /* Build a PARM_DECL for the "this" parameter of FN. TYPE is the METHOD_TYPE for a non-static member function; QUALS are the cv-qualifiers that apply to the function. */ tree build_this_parm (tree fn, tree type, cp_cv_quals quals) { tree this_type; tree qual_type; tree parm; cp_cv_quals this_quals; if (CLASS_TYPE_P (type)) { this_type = cp_build_qualified_type (type, quals & ~TYPE_QUAL_RESTRICT); this_type = build_pointer_type (this_type); } else this_type = type_of_this_parm (type); /* The `this' parameter is implicitly `const'; it cannot be assigned to. */ this_quals = (quals & TYPE_QUAL_RESTRICT) | TYPE_QUAL_CONST; qual_type = cp_build_qualified_type (this_type, this_quals); parm = build_artificial_parm (fn, this_identifier, qual_type); cp_apply_type_quals_to_decl (this_quals, parm); return parm; } /* DECL is a static member function. Complain if it was declared with function-cv-quals. */ static void check_static_quals (tree decl, cp_cv_quals quals) { if (quals != TYPE_UNQUALIFIED) error ("static member function %q#D declared with type qualifiers", decl); } // Check that FN takes no arguments and returns bool. static void check_concept_fn (tree fn) { // A constraint is nullary. if (DECL_ARGUMENTS (fn)) error_at (DECL_SOURCE_LOCATION (fn), "concept %q#D declared with function parameters", fn); // The declared return type of the concept shall be bool, and // it shall not be deduced from it definition. tree type = TREE_TYPE (TREE_TYPE (fn)); if (is_auto (type)) error_at (DECL_SOURCE_LOCATION (fn), "concept %q#D declared with a deduced return type", fn); else if (type != boolean_type_node) error_at (DECL_SOURCE_LOCATION (fn), "concept %q#D with non-%<bool%> return type %qT", fn, type); } /* Helper function. Replace the temporary this parameter injected during cp_finish_omp_declare_simd with the real this parameter. */ static tree declare_simd_adjust_this (tree *tp, int *walk_subtrees, void *data) { tree this_parm = (tree) data; if (TREE_CODE (*tp) == PARM_DECL && DECL_NAME (*tp) == this_identifier && *tp != this_parm) *tp = this_parm; else if (TYPE_P (*tp)) *walk_subtrees = 0; return NULL_TREE; } /* CTYPE is class type, or null if non-class. TYPE is type this FUNCTION_DECL should have, either FUNCTION_TYPE or METHOD_TYPE. DECLARATOR is the function's name. PARMS is a chain of PARM_DECLs for the function. VIRTUALP is truthvalue of whether the function is virtual or not. FLAGS are to be passed through to `grokclassfn'. QUALS are qualifiers indicating whether the function is `const' or `volatile'. RAISES is a list of exceptions that this function can raise. CHECK is 1 if we must find this method in CTYPE, 0 if we should not look, and -1 if we should not call `grokclassfn' at all. SFK is the kind of special function (if any) for the new function. Returns `NULL_TREE' if something goes wrong, after issuing applicable error messages. */ static tree grokfndecl (tree ctype, tree type, tree declarator, tree parms, tree orig_declarator, const cp_decl_specifier_seq *declspecs, tree decl_reqs, int virtualp, enum overload_flags flags, cp_cv_quals quals, cp_ref_qualifier rqual, tree raises, int check, int friendp, int publicp, int inlinep, bool deletedp, special_function_kind sfk, bool funcdef_flag, bool late_return_type_p, int initialized, int template_count, tree in_namespace, tree* attrlist, location_t location) { tree decl; int staticp = ctype && TREE_CODE (type) == FUNCTION_TYPE; tree t; if (location == UNKNOWN_LOCATION) location = input_location; /* Was the concept specifier present? */ bool concept_p = inlinep & 4; /* Concept declarations must have a corresponding definition. */ if (concept_p && !funcdef_flag) { error_at (location, "concept %qD has no definition", declarator); return NULL_TREE; } type = build_cp_fntype_variant (type, rqual, raises, late_return_type_p); decl = build_lang_decl_loc (location, FUNCTION_DECL, declarator, type); /* Set the constraints on the declaration. */ if (flag_concepts) { tree tmpl_reqs = NULL_TREE; tree ctx = friendp ? current_class_type : ctype; bool memtmpl = (processing_template_decl > template_class_depth (ctx)); if (memtmpl) tmpl_reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree ci = build_constraints (tmpl_reqs, decl_reqs); if (concept_p && ci) { error_at (location, "a function concept cannot be constrained"); ci = NULL_TREE; } /* C++20 CA378: Remove non-templated constrained functions. */ if (ci && !flag_concepts_ts && (!processing_template_decl || (friendp && !memtmpl && !initialized && !funcdef_flag))) { error_at (location, "constraints on a non-templated function"); ci = NULL_TREE; } set_constraints (decl, ci); } if (TREE_CODE (type) == METHOD_TYPE) { tree parm = build_this_parm (decl, type, quals); DECL_CHAIN (parm) = parms; parms = parm; /* Allocate space to hold the vptr bit if needed. */ SET_DECL_ALIGN (decl, MINIMUM_METHOD_BOUNDARY); } DECL_ARGUMENTS (decl) = parms; for (t = parms; t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = decl; /* Propagate volatile out from type to decl. */ if (TYPE_VOLATILE (type)) TREE_THIS_VOLATILE (decl) = 1; /* Setup decl according to sfk. */ switch (sfk) { case sfk_constructor: case sfk_copy_constructor: case sfk_move_constructor: DECL_CXX_CONSTRUCTOR_P (decl) = 1; DECL_NAME (decl) = ctor_identifier; break; case sfk_destructor: DECL_CXX_DESTRUCTOR_P (decl) = 1; DECL_NAME (decl) = dtor_identifier; break; default: break; } if (friendp && TREE_CODE (orig_declarator) == TEMPLATE_ID_EXPR) { if (funcdef_flag) error_at (location, "defining explicit specialization %qD in friend declaration", orig_declarator); else { tree fns = TREE_OPERAND (orig_declarator, 0); tree args = TREE_OPERAND (orig_declarator, 1); if (PROCESSING_REAL_TEMPLATE_DECL_P ()) { /* Something like `template <class T> friend void f<T>()'. */ error_at (location, "invalid use of template-id %qD in declaration " "of primary template", orig_declarator); return NULL_TREE; } /* A friend declaration of the form friend void f<>(). Record the information in the TEMPLATE_ID_EXPR. */ SET_DECL_IMPLICIT_INSTANTIATION (decl); gcc_assert (identifier_p (fns) || OVL_P (fns)); DECL_TEMPLATE_INFO (decl) = build_template_info (fns, args); for (t = TYPE_ARG_TYPES (TREE_TYPE (decl)); t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == DEFERRED_PARSE) { error_at (defparse_location (TREE_PURPOSE (t)), "default arguments are not allowed in declaration " "of friend template specialization %qD", decl); return NULL_TREE; } if (inlinep & 1) { error_at (declspecs->locations[ds_inline], "%<inline%> is not allowed in declaration of friend " "template specialization %qD", decl); return NULL_TREE; } } } /* C++17 11.3.6/4: "If a friend declaration specifies a default argument expression, that declaration shall be a definition..." */ if (friendp && !funcdef_flag) { for (tree t = FUNCTION_FIRST_USER_PARMTYPE (decl); t && t != void_list_node; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (DECL_SOURCE_LOCATION (decl), "friend declaration of %qD specifies default " "arguments and isn%'t a definition", decl); break; } } /* FIXME: For now. */ if (virtualp && (inlinep & 8) != 0) { sorry_at (DECL_SOURCE_LOCATION (decl), "%<virtual%> %<consteval%> method %qD not supported yet", decl); inlinep &= ~8; } /* If this decl has namespace scope, set that up. */ if (in_namespace) set_decl_namespace (decl, in_namespace, friendp); else if (!ctype) DECL_CONTEXT (decl) = FROB_CONTEXT (current_decl_namespace ()); /* `main' and builtins have implicit 'C' linkage. */ if (ctype == NULL_TREE && DECL_FILE_SCOPE_P (decl) && current_lang_name == lang_name_cplusplus && (MAIN_NAME_P (declarator) || (IDENTIFIER_LENGTH (declarator) > 10 && IDENTIFIER_POINTER (declarator)[0] == '_' && IDENTIFIER_POINTER (declarator)[1] == '_' && strncmp (IDENTIFIER_POINTER (declarator)+2, "builtin_", 8) == 0) || (targetcm.cxx_implicit_extern_c && (targetcm.cxx_implicit_extern_c (IDENTIFIER_POINTER (declarator)))))) SET_DECL_LANGUAGE (decl, lang_c); /* Should probably propagate const out from type to decl I bet (mrs). */ if (staticp) { DECL_STATIC_FUNCTION_P (decl) = 1; DECL_CONTEXT (decl) = ctype; } if (deletedp) DECL_DELETED_FN (decl) = 1; if (ctype) { DECL_CONTEXT (decl) = ctype; if (funcdef_flag) check_class_member_definition_namespace (decl); } if (ctype == NULL_TREE && DECL_MAIN_P (decl)) { if (PROCESSING_REAL_TEMPLATE_DECL_P()) error_at (location, "cannot declare %<::main%> to be a template"); if (inlinep & 1) error_at (declspecs->locations[ds_inline], "cannot declare %<::main%> to be inline"); if (inlinep & 2) error_at (declspecs->locations[ds_constexpr], "cannot declare %<::main%> to be %qs", "constexpr"); if (inlinep & 8) error_at (declspecs->locations[ds_consteval], "cannot declare %<::main%> to be %qs", "consteval"); if (!publicp) error_at (location, "cannot declare %<::main%> to be static"); inlinep = 0; publicp = 1; } /* Members of anonymous types and local classes have no linkage; make them internal. If a typedef is made later, this will be changed. */ if (ctype && (!TREE_PUBLIC (TYPE_MAIN_DECL (ctype)) || decl_function_context (TYPE_MAIN_DECL (ctype)))) publicp = 0; if (publicp && cxx_dialect == cxx98) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. DR 757 relaxes this restriction for C++0x. */ no_linkage_error (decl); } TREE_PUBLIC (decl) = publicp; if (! publicp) { DECL_INTERFACE_KNOWN (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; } /* If the declaration was declared inline, mark it as such. */ if (inlinep) { DECL_DECLARED_INLINE_P (decl) = 1; if (publicp) DECL_COMDAT (decl) = 1; } if (inlinep & 2) DECL_DECLARED_CONSTEXPR_P (decl) = true; else if (inlinep & 8) { DECL_DECLARED_CONSTEXPR_P (decl) = true; SET_DECL_IMMEDIATE_FUNCTION_P (decl); } // If the concept declaration specifier was found, check // that the declaration satisfies the necessary requirements. if (concept_p) { DECL_DECLARED_CONCEPT_P (decl) = true; check_concept_fn (decl); } DECL_EXTERNAL (decl) = 1; if (TREE_CODE (type) == FUNCTION_TYPE) { if (quals || rqual) TREE_TYPE (decl) = apply_memfn_quals (TREE_TYPE (decl), TYPE_UNQUALIFIED, REF_QUAL_NONE); if (quals) { error (ctype ? G_("static member function %qD cannot have cv-qualifier") : G_("non-member function %qD cannot have cv-qualifier"), decl); quals = TYPE_UNQUALIFIED; } if (rqual) { error (ctype ? G_("static member function %qD cannot have ref-qualifier") : G_("non-member function %qD cannot have ref-qualifier"), decl); rqual = REF_QUAL_NONE; } } if (deduction_guide_p (decl)) { if (!DECL_NAMESPACE_SCOPE_P (decl)) { error_at (location, "deduction guide %qD must be declared at " "namespace scope", decl); return NULL_TREE; } tree type = TREE_TYPE (DECL_NAME (decl)); if (in_namespace == NULL_TREE && CP_DECL_CONTEXT (decl) != CP_TYPE_CONTEXT (type)) { error_at (location, "deduction guide %qD must be declared in the " "same scope as %qT", decl, type); inform (location_of (type), " declared here"); return NULL_TREE; } if (funcdef_flag) error_at (location, "deduction guide %qD must not have a function body", decl); } else if (IDENTIFIER_ANY_OP_P (DECL_NAME (decl)) && !grok_op_properties (decl, /*complain=*/true)) return NULL_TREE; else if (UDLIT_OPER_P (DECL_NAME (decl))) { bool long_long_unsigned_p; bool long_double_p; const char *suffix = NULL; /* [over.literal]/6: Literal operators shall not have C linkage. */ if (DECL_LANGUAGE (decl) == lang_c) { error_at (location, "literal operator with C linkage"); maybe_show_extern_c_location (); return NULL_TREE; } if (DECL_NAMESPACE_SCOPE_P (decl)) { if (!check_literal_operator_args (decl, &long_long_unsigned_p, &long_double_p)) { error_at (location, "%qD has invalid argument list", decl); return NULL_TREE; } suffix = UDLIT_OP_SUFFIX (DECL_NAME (decl)); if (long_long_unsigned_p) { if (cpp_interpret_int_suffix (parse_in, suffix, strlen (suffix))) warning_at (location, 0, "integer suffix %qs" " shadowed by implementation", suffix); } else if (long_double_p) { if (cpp_interpret_float_suffix (parse_in, suffix, strlen (suffix))) warning_at (location, 0, "floating-point suffix %qs" " shadowed by implementation", suffix); } /* 17.6.3.3.5 */ if (suffix[0] != '_' && !current_function_decl && !(friendp && !funcdef_flag)) warning_at (location, OPT_Wliteral_suffix, "literal operator suffixes not preceded by %<_%>" " are reserved for future standardization"); } else { error_at (location, "%qD must be a non-member function", decl); return NULL_TREE; } } if (funcdef_flag) /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced later with the BLOCK. */ DECL_INITIAL (decl) = error_mark_node; if (TYPE_NOTHROW_P (type) || nothrow_libfn_p (decl)) TREE_NOTHROW (decl) = 1; if (flag_openmp || flag_openmp_simd) { /* Adjust "omp declare simd" attributes. */ tree ods = lookup_attribute ("omp declare simd", *attrlist); if (ods) { tree attr; for (attr = ods; attr; attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))) { if (TREE_CODE (type) == METHOD_TYPE) walk_tree (&TREE_VALUE (attr), declare_simd_adjust_this, DECL_ARGUMENTS (decl), NULL); if (TREE_VALUE (attr) != NULL_TREE) { tree cl = TREE_VALUE (TREE_VALUE (attr)); cl = c_omp_declare_simd_clauses_to_numbers (DECL_ARGUMENTS (decl), cl); if (cl) TREE_VALUE (TREE_VALUE (attr)) = cl; else TREE_VALUE (attr) = NULL_TREE; } } } } /* Caller will do the rest of this. */ if (check < 0) return decl; if (ctype != NULL_TREE) grokclassfn (ctype, decl, flags); /* 12.4/3 */ if (cxx_dialect >= cxx11 && DECL_DESTRUCTOR_P (decl) && !TYPE_BEING_DEFINED (DECL_CONTEXT (decl)) && !processing_template_decl) deduce_noexcept_on_destructor (decl); decl = check_explicit_specialization (orig_declarator, decl, template_count, 2 * funcdef_flag + 4 * (friendp != 0) + 8 * concept_p, *attrlist); if (decl == error_mark_node) return NULL_TREE; if (DECL_STATIC_FUNCTION_P (decl)) check_static_quals (decl, quals); if (attrlist) { cplus_decl_attributes (&decl, *attrlist, 0); *attrlist = NULL_TREE; } /* Check main's type after attributes have been applied. */ if (ctype == NULL_TREE && DECL_MAIN_P (decl)) { if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)), integer_type_node)) { tree oldtypeargs = TYPE_ARG_TYPES (TREE_TYPE (decl)); tree newtype; error_at (declspecs->locations[ds_type_spec], "%<::main%> must return %<int%>"); newtype = build_function_type (integer_type_node, oldtypeargs); TREE_TYPE (decl) = newtype; } if (warn_main) check_main_parameter_types (decl); } if (ctype != NULL_TREE && check) { tree old_decl = check_classfn (ctype, decl, (processing_template_decl > template_class_depth (ctype)) ? current_template_parms : NULL_TREE); if (old_decl == error_mark_node) return NULL_TREE; if (old_decl) { tree ok; tree pushed_scope; if (TREE_CODE (old_decl) == TEMPLATE_DECL) /* Because grokfndecl is always supposed to return a FUNCTION_DECL, we pull out the DECL_TEMPLATE_RESULT here. We depend on our callers to figure out that its really a template that's being returned. */ old_decl = DECL_TEMPLATE_RESULT (old_decl); if (DECL_STATIC_FUNCTION_P (old_decl) && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) { /* Remove the `this' parm added by grokclassfn. */ revert_static_member_fn (decl); check_static_quals (decl, quals); } if (DECL_ARTIFICIAL (old_decl)) { error ("definition of implicitly-declared %qD", old_decl); return NULL_TREE; } else if (DECL_DEFAULTED_FN (old_decl)) { error ("definition of explicitly-defaulted %q+D", decl); inform (DECL_SOURCE_LOCATION (old_decl), "%q#D explicitly defaulted here", old_decl); return NULL_TREE; } /* Since we've smashed OLD_DECL to its DECL_TEMPLATE_RESULT, we must do the same to DECL. */ if (TREE_CODE (decl) == TEMPLATE_DECL) decl = DECL_TEMPLATE_RESULT (decl); /* Attempt to merge the declarations. This can fail, in the case of some invalid specialization declarations. */ pushed_scope = push_scope (ctype); ok = duplicate_decls (decl, old_decl, friendp); if (pushed_scope) pop_scope (pushed_scope); if (!ok) { error ("no %q#D member function declared in class %qT", decl, ctype); return NULL_TREE; } if (ok == error_mark_node) return NULL_TREE; return old_decl; } } if (DECL_CONSTRUCTOR_P (decl) && !grok_ctor_properties (ctype, decl)) return NULL_TREE; if (ctype == NULL_TREE || check) return decl; if (virtualp) DECL_VIRTUAL_P (decl) = 1; return decl; } /* decl is a FUNCTION_DECL. specifiers are the parsed virt-specifiers. Set flags to reflect the virt-specifiers. Returns decl. */ static tree set_virt_specifiers (tree decl, cp_virt_specifiers specifiers) { if (decl == NULL_TREE) return decl; if (specifiers & VIRT_SPEC_OVERRIDE) DECL_OVERRIDE_P (decl) = 1; if (specifiers & VIRT_SPEC_FINAL) DECL_FINAL_P (decl) = 1; return decl; } /* DECL is a VAR_DECL for a static data member. Set flags to reflect the linkage that DECL will receive in the object file. */ static void set_linkage_for_static_data_member (tree decl) { /* A static data member always has static storage duration and external linkage. Note that static data members are forbidden in local classes -- the only situation in which a class has non-external linkage. */ TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; /* For non-template classes, static data members are always put out in exactly those files where they are defined, just as with ordinary namespace-scope variables. */ if (!processing_template_decl) DECL_INTERFACE_KNOWN (decl) = 1; } /* Create a VAR_DECL named NAME with the indicated TYPE. If SCOPE is non-NULL, it is the class type or namespace containing the variable. If SCOPE is NULL, the variable should is created in the innermost enclosing scope. */ static tree grokvardecl (tree type, tree name, tree orig_declarator, const cp_decl_specifier_seq *declspecs, int initialized, int type_quals, int inlinep, bool conceptp, int template_count, tree scope, location_t location) { tree decl; tree explicit_scope; gcc_assert (!name || identifier_p (name)); bool constp = (type_quals & TYPE_QUAL_CONST) != 0; bool volatilep = (type_quals & TYPE_QUAL_VOLATILE) != 0; /* Compute the scope in which to place the variable, but remember whether or not that scope was explicitly specified by the user. */ explicit_scope = scope; if (!scope) { /* An explicit "extern" specifier indicates a namespace-scope variable. */ if (declspecs->storage_class == sc_extern) scope = current_decl_namespace (); else if (!at_function_scope_p ()) scope = current_scope (); } if (scope && (/* If the variable is a namespace-scope variable declared in a template, we need DECL_LANG_SPECIFIC. */ (TREE_CODE (scope) == NAMESPACE_DECL && processing_template_decl) /* Similarly for namespace-scope variables with language linkage other than C++. */ || (TREE_CODE (scope) == NAMESPACE_DECL && current_lang_name != lang_name_cplusplus) /* Similarly for static data members. */ || TYPE_P (scope) /* Similarly for explicit specializations. */ || (orig_declarator && TREE_CODE (orig_declarator) == TEMPLATE_ID_EXPR))) decl = build_lang_decl_loc (location, VAR_DECL, name, type); else decl = build_decl (location, VAR_DECL, name, type); if (explicit_scope && TREE_CODE (explicit_scope) == NAMESPACE_DECL) set_decl_namespace (decl, explicit_scope, 0); else DECL_CONTEXT (decl) = FROB_CONTEXT (scope); if (declspecs->storage_class == sc_extern) { DECL_THIS_EXTERN (decl) = 1; DECL_EXTERNAL (decl) = !initialized; } if (DECL_CLASS_SCOPE_P (decl)) { set_linkage_for_static_data_member (decl); /* This function is only called with out-of-class definitions. */ DECL_EXTERNAL (decl) = 0; check_class_member_definition_namespace (decl); } /* At top level, either `static' or no s.c. makes a definition (perhaps tentative), and absence of `static' makes it public. */ else if (toplevel_bindings_p ()) { TREE_PUBLIC (decl) = (declspecs->storage_class != sc_static && (DECL_THIS_EXTERN (decl) || ! constp || volatilep || inlinep)); TREE_STATIC (decl) = ! DECL_EXTERNAL (decl); } /* Not at top level, only `static' makes a static definition. */ else { TREE_STATIC (decl) = declspecs->storage_class == sc_static; TREE_PUBLIC (decl) = DECL_EXTERNAL (decl); } if (decl_spec_seq_has_spec_p (declspecs, ds_thread)) { if (DECL_EXTERNAL (decl) || TREE_STATIC (decl)) { CP_DECL_THREAD_LOCAL_P (decl) = true; if (!processing_template_decl) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if (declspecs->gnu_thread_keyword_p) SET_DECL_GNU_TLS_P (decl); } /* If the type of the decl has no linkage, make sure that we'll notice that in mark_used. */ if (cxx_dialect > cxx98 && decl_linkage (decl) != lk_none && DECL_LANG_SPECIFIC (decl) == NULL && !DECL_EXTERN_C_P (decl) && no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false)) retrofit_lang_decl (decl); if (TREE_PUBLIC (decl)) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. DR 757 relaxes this restriction for C++0x. */ if (cxx_dialect < cxx11) no_linkage_error (decl); } else DECL_INTERFACE_KNOWN (decl) = 1; if (DECL_NAME (decl) && MAIN_NAME_P (DECL_NAME (decl)) && scope == global_namespace) error_at (DECL_SOURCE_LOCATION (decl), "cannot declare %<::main%> to be a global variable"); /* Check that the variable can be safely declared as a concept. Note that this also forbids explicit specializations. */ if (conceptp) { if (!processing_template_decl) { error_at (declspecs->locations[ds_concept], "a non-template variable cannot be %<concept%>"); return NULL_TREE; } else DECL_DECLARED_CONCEPT_P (decl) = true; if (!same_type_ignoring_top_level_qualifiers_p (type, boolean_type_node)) error_at (declspecs->locations[ds_type_spec], "concept must have type %<bool%>"); if (TEMPLATE_PARMS_CONSTRAINTS (current_template_parms)) { error_at (location, "a variable concept cannot be constrained"); TEMPLATE_PARMS_CONSTRAINTS (current_template_parms) = NULL_TREE; } } else if (flag_concepts && processing_template_decl > template_class_depth (scope)) { tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree ci = build_constraints (reqs, NULL_TREE); set_constraints (decl, ci); } // Handle explicit specializations and instantiations of variable templates. if (orig_declarator) decl = check_explicit_specialization (orig_declarator, decl, template_count, conceptp * 8); return decl != error_mark_node ? decl : NULL_TREE; } /* Create and return a canonical pointer to member function type, for TYPE, which is a POINTER_TYPE to a METHOD_TYPE. */ tree build_ptrmemfunc_type (tree type) { tree field, fields; tree t; if (type == error_mark_node) return type; /* Make sure that we always have the unqualified pointer-to-member type first. */ if (cp_cv_quals quals = cp_type_quals (type)) { tree unqual = build_ptrmemfunc_type (TYPE_MAIN_VARIANT (type)); return cp_build_qualified_type (unqual, quals); } /* If a canonical type already exists for this type, use it. We use this method instead of type_hash_canon, because it only does a simple equality check on the list of field members. */ t = TYPE_PTRMEMFUNC_TYPE (type); if (t) return t; t = make_node (RECORD_TYPE); /* Let the front end know this is a pointer to member function. */ TYPE_PTRMEMFUNC_FLAG (t) = 1; field = build_decl (input_location, FIELD_DECL, pfn_identifier, type); DECL_NONADDRESSABLE_P (field) = 1; fields = field; field = build_decl (input_location, FIELD_DECL, delta_identifier, delta_type_node); DECL_NONADDRESSABLE_P (field) = 1; DECL_CHAIN (field) = fields; fields = field; finish_builtin_struct (t, "__ptrmemfunc_type", fields, ptr_type_node); /* Zap out the name so that the back end will give us the debugging information for this anonymous RECORD_TYPE. */ TYPE_NAME (t) = NULL_TREE; /* Cache this pointer-to-member type so that we can find it again later. */ TYPE_PTRMEMFUNC_TYPE (type) = t; if (TYPE_STRUCTURAL_EQUALITY_P (type)) SET_TYPE_STRUCTURAL_EQUALITY (t); else if (TYPE_CANONICAL (type) != type) TYPE_CANONICAL (t) = build_ptrmemfunc_type (TYPE_CANONICAL (type)); return t; } /* Create and return a pointer to data member type. */ tree build_ptrmem_type (tree class_type, tree member_type) { if (TREE_CODE (member_type) == METHOD_TYPE) { cp_cv_quals quals = type_memfn_quals (member_type); cp_ref_qualifier rqual = type_memfn_rqual (member_type); member_type = build_memfn_type (member_type, class_type, quals, rqual); return build_ptrmemfunc_type (build_pointer_type (member_type)); } else { gcc_assert (TREE_CODE (member_type) != FUNCTION_TYPE); return build_offset_type (class_type, member_type); } } /* DECL is a VAR_DECL defined in-class, whose TYPE is also given. Check to see that the definition is valid. Issue appropriate error messages. */ static void check_static_variable_definition (tree decl, tree type) { /* Avoid redundant diagnostics on out-of-class definitions. */ if (!current_class_type || !TYPE_BEING_DEFINED (current_class_type)) ; /* Can't check yet if we don't know the type. */ else if (dependent_type_p (type)) ; /* If DECL is declared constexpr, we'll do the appropriate checks in check_initializer. Similarly for inline static data members. */ else if (DECL_P (decl) && (DECL_DECLARED_CONSTEXPR_P (decl) || undeduced_auto_decl (decl) || DECL_VAR_DECLARED_INLINE_P (decl))) ; else if (cxx_dialect >= cxx11 && !INTEGRAL_OR_ENUMERATION_TYPE_P (type)) { if (!COMPLETE_TYPE_P (type)) error_at (DECL_SOURCE_LOCATION (decl), "in-class initialization of static data member %q#D of " "incomplete type", decl); else if (literal_type_p (type)) permerror (DECL_SOURCE_LOCATION (decl), "%<constexpr%> needed for in-class initialization of " "static data member %q#D of non-integral type", decl); else error_at (DECL_SOURCE_LOCATION (decl), "in-class initialization of static data member %q#D of " "non-literal type", decl); } /* Motion 10 at San Diego: If a static const integral data member is initialized with an integral constant expression, the initializer may appear either in the declaration (within the class), or in the definition, but not both. If it appears in the class, the member is a member constant. The file-scope definition is always required. */ else if (!ARITHMETIC_TYPE_P (type) && TREE_CODE (type) != ENUMERAL_TYPE) error_at (DECL_SOURCE_LOCATION (decl), "invalid in-class initialization of static data member " "of non-integral type %qT", type); else if (!CP_TYPE_CONST_P (type)) error_at (DECL_SOURCE_LOCATION (decl), "ISO C++ forbids in-class initialization of non-const " "static member %qD", decl); else if (!INTEGRAL_OR_ENUMERATION_TYPE_P (type)) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wpedantic, "ISO C++ forbids initialization of member constant " "%qD of non-integral type %qT", decl, type); } /* *expr_p is part of the TYPE_SIZE of a variably-sized array. If any SAVE_EXPRs in *expr_p wrap expressions with side-effects, break those expressions out into temporary variables so that walk_tree doesn't step into them (c++/15764). */ static tree stabilize_save_expr_r (tree *expr_p, int *walk_subtrees, void *data) { hash_set<tree> *pset = (hash_set<tree> *)data; tree expr = *expr_p; if (TREE_CODE (expr) == SAVE_EXPR) { tree op = TREE_OPERAND (expr, 0); cp_walk_tree (&op, stabilize_save_expr_r, data, pset); if (TREE_SIDE_EFFECTS (op)) TREE_OPERAND (expr, 0) = get_temp_regvar (TREE_TYPE (op), op); *walk_subtrees = 0; } else if (!EXPR_P (expr) || !TREE_SIDE_EFFECTS (expr)) *walk_subtrees = 0; return NULL; } /* Entry point for the above. */ static void stabilize_vla_size (tree size) { hash_set<tree> pset; /* Break out any function calls into temporary variables. */ cp_walk_tree (&size, stabilize_save_expr_r, &pset, &pset); } /* Reduce a SIZEOF_EXPR to its value. */ tree fold_sizeof_expr (tree t) { tree r; if (SIZEOF_EXPR_TYPE_P (t)) r = cxx_sizeof_or_alignof_type (EXPR_LOCATION (t), TREE_TYPE (TREE_OPERAND (t, 0)), SIZEOF_EXPR, false, false); else if (TYPE_P (TREE_OPERAND (t, 0))) r = cxx_sizeof_or_alignof_type (EXPR_LOCATION (t), TREE_OPERAND (t, 0), SIZEOF_EXPR, false, false); else r = cxx_sizeof_or_alignof_expr (EXPR_LOCATION (t), TREE_OPERAND (t, 0), SIZEOF_EXPR, false); if (r == error_mark_node) r = size_one_node; return r; } /* Given the SIZE (i.e., number of elements) in an array, compute an appropriate index type for the array. If non-NULL, NAME is the name of the entity being declared. */ static tree compute_array_index_type_loc (location_t name_loc, tree name, tree size, tsubst_flags_t complain) { if (error_operand_p (size)) return error_mark_node; /* The type of the index being computed. */ tree itype; /* The original numeric size as seen in the source code before conversion to size_t. */ tree origsize = size; location_t loc = cp_expr_loc_or_loc (size, name ? name_loc : input_location); if (!type_dependent_expression_p (size)) { origsize = size = mark_rvalue_use (size); if (cxx_dialect < cxx11 && TREE_CODE (size) == NOP_EXPR && TREE_SIDE_EFFECTS (size)) /* In C++98, we mark a non-constant array bound with a magic NOP_EXPR with TREE_SIDE_EFFECTS; don't fold in that case. */; else { size = build_converted_constant_expr (size_type_node, size, complain); /* Pedantically a constant expression is required here and so __builtin_is_constant_evaluated () should fold to true if it is successfully folded into a constant. */ size = fold_non_dependent_expr (size, complain, /*manifestly_const_eval=*/true); if (!TREE_CONSTANT (size)) size = origsize; } if (error_operand_p (size)) return error_mark_node; /* The array bound must be an integer type. */ tree type = TREE_TYPE (size); if (!INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (type)) { if (!(complain & tf_error)) return error_mark_node; if (name) error_at (loc, "size of array %qD has non-integral type %qT", name, type); else error_at (loc, "size of array has non-integral type %qT", type); size = integer_one_node; } } /* A type is dependent if it is...an array type constructed from any dependent type or whose size is specified by a constant expression that is value-dependent. */ /* We can only call value_dependent_expression_p on integral constant expressions; treat non-constant expressions as dependent, too. */ if (processing_template_decl && (!TREE_CONSTANT (size) || value_dependent_expression_p (size))) { /* We cannot do any checking for a SIZE that isn't known to be constant. Just build the index type and mark that it requires structural equality checks. */ itype = build_index_type (build_min (MINUS_EXPR, sizetype, size, size_one_node)); TYPE_DEPENDENT_P (itype) = 1; TYPE_DEPENDENT_P_VALID (itype) = 1; SET_TYPE_STRUCTURAL_EQUALITY (itype); return itype; } if (TREE_CODE (size) != INTEGER_CST) { tree folded = cp_fully_fold (size); if (TREE_CODE (folded) == INTEGER_CST) { if (name) pedwarn (loc, OPT_Wpedantic, "size of array %qD is not an " "integral constant-expression", name); else pedwarn (loc, OPT_Wpedantic, "size of array is not an integral constant-expression"); } if (TREE_CONSTANT (size) && !TREE_CONSTANT (folded)) /* We might have lost the TREE_CONSTANT flag e.g. when we are folding a conversion from a pointer to integral type. In that case issue an error below and don't treat this as a VLA. */; else /* Use the folded result for VLAs, too; it will have resolved SIZEOF_EXPR. */ size = folded; } /* Normally, the array-bound will be a constant. */ if (TREE_CODE (size) == INTEGER_CST) { /* The size to use in diagnostics that reflects the constant size used in the source, rather than SIZE massaged above. */ tree diagsize = size; /* If the original size before conversion to size_t was signed and negative, convert it to ssizetype to restore the sign. */ if (!TYPE_UNSIGNED (TREE_TYPE (origsize)) && TREE_CODE (size) == INTEGER_CST && tree_int_cst_sign_bit (size)) { diagsize = fold_convert (ssizetype, size); /* Clear the overflow bit that may have been set as a result of the conversion from the sizetype of the new size to ssizetype. */ TREE_OVERFLOW (diagsize) = false; } /* Verify that the array has a positive number of elements and issue the appropriate diagnostic if it doesn't. */ if (!valid_array_size_p (loc, diagsize, name, (complain & tf_error))) { if (!(complain & tf_error)) return error_mark_node; size = integer_one_node; } /* As an extension we allow zero-sized arrays. */ else if (integer_zerop (size)) { if (!(complain & tf_error)) /* We must fail if performing argument deduction (as indicated by the state of complain), so that another substitution can be found. */ return error_mark_node; else if (name) pedwarn (loc, OPT_Wpedantic, "ISO C++ forbids zero-size array %qD", name); else pedwarn (loc, OPT_Wpedantic, "ISO C++ forbids zero-size array"); } } else if (TREE_CONSTANT (size) /* We don't allow VLAs at non-function scopes, or during tentative template substitution. */ || !at_function_scope_p () || !(complain & tf_error)) { if (!(complain & tf_error)) return error_mark_node; /* `(int) &fn' is not a valid array bound. */ if (name) error_at (loc, "size of array %qD is not an integral constant-expression", name); else error_at (loc, "size of array is not an integral constant-expression"); size = integer_one_node; } else if (pedantic && warn_vla != 0) { if (name) pedwarn (name_loc, OPT_Wvla, "ISO C++ forbids variable length array %qD", name); else pedwarn (input_location, OPT_Wvla, "ISO C++ forbids variable length array"); } else if (warn_vla > 0) { if (name) warning_at (name_loc, OPT_Wvla, "variable length array %qD is used", name); else warning (OPT_Wvla, "variable length array is used"); } if (processing_template_decl && !TREE_CONSTANT (size)) /* A variable sized array. */ itype = build_min (MINUS_EXPR, sizetype, size, integer_one_node); else { /* Compute the index of the largest element in the array. It is one less than the number of elements in the array. We save and restore PROCESSING_TEMPLATE_DECL so that computations in cp_build_binary_op will be appropriately folded. */ { processing_template_decl_sentinel s; itype = cp_build_binary_op (input_location, MINUS_EXPR, cp_convert (ssizetype, size, complain), cp_convert (ssizetype, integer_one_node, complain), complain); itype = maybe_constant_value (itype); } if (!TREE_CONSTANT (itype)) { /* A variable sized array. */ itype = variable_size (itype); stabilize_vla_size (itype); if (sanitize_flags_p (SANITIZE_VLA) && current_function_decl != NULL_TREE) { /* We have to add 1 -- in the ubsan routine we generate LE_EXPR rather than LT_EXPR. */ tree t = fold_build2 (PLUS_EXPR, TREE_TYPE (itype), itype, build_one_cst (TREE_TYPE (itype))); t = ubsan_instrument_vla (input_location, t); finish_expr_stmt (t); } } /* Make sure that there was no overflow when creating to a signed index type. (For example, on a 32-bit machine, an array with size 2^32 - 1 is too big.) */ else if (TREE_CODE (itype) == INTEGER_CST && TREE_OVERFLOW (itype)) { if (!(complain & tf_error)) return error_mark_node; error ("overflow in array dimension"); TREE_OVERFLOW (itype) = 0; } } /* Create and return the appropriate index type. */ itype = build_index_type (itype); /* If the index type were dependent, we would have returned early, so remember that it isn't. */ TYPE_DEPENDENT_P (itype) = 0; TYPE_DEPENDENT_P_VALID (itype) = 1; return itype; } tree compute_array_index_type (tree name, tree size, tsubst_flags_t complain) { return compute_array_index_type_loc (input_location, name, size, complain); } /* Returns the scope (if any) in which the entity declared by DECLARATOR will be located. If the entity was declared with an unqualified name, NULL_TREE is returned. */ tree get_scope_of_declarator (const cp_declarator *declarator) { while (declarator && declarator->kind != cdk_id) declarator = declarator->declarator; /* If the declarator-id is a SCOPE_REF, the scope in which the declaration occurs is the first operand. */ if (declarator && declarator->u.id.qualifying_scope) return declarator->u.id.qualifying_scope; /* Otherwise, the declarator is not a qualified name; the entity will be declared in the current scope. */ return NULL_TREE; } /* Returns an ARRAY_TYPE for an array with SIZE elements of the indicated TYPE. If non-NULL, NAME is the NAME of the declaration with this type. */ static tree create_array_type_for_decl (tree name, tree type, tree size, location_t loc) { tree itype = NULL_TREE; /* If things have already gone awry, bail now. */ if (type == error_mark_node || size == error_mark_node) return error_mark_node; /* 8.3.4/1: If the type of the identifier of D contains the auto type-specifier, the program is ill-formed. */ if (type_uses_auto (type)) { if (name) error_at (loc, "%qD declared as array of %qT", name, type); else error ("creating array of %qT", type); return error_mark_node; } /* If there are some types which cannot be array elements, issue an error-message and return. */ switch (TREE_CODE (type)) { case VOID_TYPE: if (name) error_at (loc, "declaration of %qD as array of void", name); else error ("creating array of void"); return error_mark_node; case FUNCTION_TYPE: if (name) error_at (loc, "declaration of %qD as array of functions", name); else error ("creating array of functions"); return error_mark_node; case REFERENCE_TYPE: if (name) error_at (loc, "declaration of %qD as array of references", name); else error ("creating array of references"); return error_mark_node; case METHOD_TYPE: if (name) error_at (loc, "declaration of %qD as array of function members", name); else error ("creating array of function members"); return error_mark_node; default: break; } if (!verify_type_context (name ? loc : input_location, TCTX_ARRAY_ELEMENT, type)) return error_mark_node; /* [dcl.array] The constant expressions that specify the bounds of the arrays can be omitted only for the first member of the sequence. */ if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type)) { if (name) error_at (loc, "declaration of %qD as multidimensional array must " "have bounds for all dimensions except the first", name); else error ("multidimensional array must have bounds for all " "dimensions except the first"); return error_mark_node; } /* Figure out the index type for the array. */ if (size) itype = compute_array_index_type_loc (loc, name, size, tf_warning_or_error); /* [dcl.array] T is called the array element type; this type shall not be [...] an abstract class type. */ abstract_virtuals_error (name, type); return build_cplus_array_type (type, itype); } /* Returns the smallest location that is not UNKNOWN_LOCATION. */ static location_t min_location (location_t loca, location_t locb) { if (loca == UNKNOWN_LOCATION || (locb != UNKNOWN_LOCATION && linemap_location_before_p (line_table, locb, loca))) return locb; return loca; } /* Returns the smallest location != UNKNOWN_LOCATION among the three stored in LOCATIONS[ds_const], LOCATIONS[ds_volatile], and LOCATIONS[ds_restrict]. */ static location_t smallest_type_quals_location (int type_quals, const location_t* locations) { location_t loc = UNKNOWN_LOCATION; if (type_quals & TYPE_QUAL_CONST) loc = locations[ds_const]; if (type_quals & TYPE_QUAL_VOLATILE) loc = min_location (loc, locations[ds_volatile]); if (type_quals & TYPE_QUAL_RESTRICT) loc = min_location (loc, locations[ds_restrict]); return loc; } /* Returns the smallest among the latter and locations[ds_type_spec]. */ static location_t smallest_type_location (int type_quals, const location_t* locations) { location_t loc = smallest_type_quals_location (type_quals, locations); return min_location (loc, locations[ds_type_spec]); } static location_t smallest_type_location (const cp_decl_specifier_seq *declspecs) { int type_quals = get_type_quals (declspecs); return smallest_type_location (type_quals, declspecs->locations); } /* Check that it's OK to declare a function with the indicated TYPE and TYPE_QUALS. SFK indicates the kind of special function (if any) that this function is. OPTYPE is the type given in a conversion operator declaration, or the class type for a constructor/destructor. Returns the actual return type of the function; that may be different than TYPE if an error occurs, or for certain special functions. */ static tree check_special_function_return_type (special_function_kind sfk, tree type, tree optype, int type_quals, const location_t* locations) { switch (sfk) { case sfk_constructor: if (type) error_at (smallest_type_location (type_quals, locations), "return type specification for constructor invalid"); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on constructor declaration"); if (targetm.cxx.cdtor_returns_this ()) type = build_pointer_type (optype); else type = void_type_node; break; case sfk_destructor: if (type) error_at (smallest_type_location (type_quals, locations), "return type specification for destructor invalid"); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on destructor declaration"); /* We can't use the proper return type here because we run into problems with ambiguous bases and covariant returns. */ if (targetm.cxx.cdtor_returns_this ()) type = build_pointer_type (void_type_node); else type = void_type_node; break; case sfk_conversion: if (type) error_at (smallest_type_location (type_quals, locations), "return type specified for %<operator %T%>", optype); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on declaration of " "%<operator %T%>", optype); type = optype; break; case sfk_deduction_guide: if (type) error_at (smallest_type_location (type_quals, locations), "return type specified for deduction guide"); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on declaration of " "deduction guide"); if (TREE_CODE (optype) == TEMPLATE_TEMPLATE_PARM) { error ("template template parameter %qT in declaration of " "deduction guide", optype); type = error_mark_node; } else type = make_template_placeholder (CLASSTYPE_TI_TEMPLATE (optype)); for (int i = 0; i < ds_last; ++i) if (i != ds_explicit && locations[i]) error_at (locations[i], "%<decl-specifier%> in declaration of deduction guide"); break; default: gcc_unreachable (); } return type; } /* A variable or data member (whose unqualified name is IDENTIFIER) has been declared with the indicated TYPE. If the TYPE is not acceptable, issue an error message and return a type to use for error-recovery purposes. */ tree check_var_type (tree identifier, tree type, location_t loc) { if (VOID_TYPE_P (type)) { if (!identifier) error_at (loc, "unnamed variable or field declared void"); else if (identifier_p (identifier)) { gcc_assert (!IDENTIFIER_ANY_OP_P (identifier)); error_at (loc, "variable or field %qE declared void", identifier); } else error_at (loc, "variable or field declared void"); type = error_mark_node; } return type; } /* Handle declaring DECL as an inline variable. */ static void mark_inline_variable (tree decl, location_t loc) { bool inlinep = true; if (! toplevel_bindings_p ()) { error_at (loc, "%<inline%> specifier invalid for variable " "%qD declared at block scope", decl); inlinep = false; } else if (cxx_dialect < cxx17) pedwarn (loc, 0, "inline variables are only available " "with %<-std=c++17%> or %<-std=gnu++17%>"); if (inlinep) { retrofit_lang_decl (decl); SET_DECL_VAR_DECLARED_INLINE_P (decl); } } /* Assign a typedef-given name to a class or enumeration type declared as anonymous at first. This was split out of grokdeclarator because it is also used in libcc1. */ void name_unnamed_type (tree type, tree decl) { gcc_assert (TYPE_UNNAMED_P (type)); /* Replace the anonymous name with the real name everywhere. */ for (tree t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) if (IDENTIFIER_ANON_P (TYPE_IDENTIFIER (t))) /* We do not rename the debug info representing the unnamed tagged type because the standard says in [dcl.typedef] that the naming applies only for linkage purposes. */ /*debug_hooks->set_name (t, decl);*/ TYPE_NAME (t) = decl; /* If this is a typedef within a template class, the nested type is a (non-primary) template. The name for the template needs updating as well. */ if (TYPE_LANG_SPECIFIC (type) && CLASSTYPE_TEMPLATE_INFO (type)) DECL_NAME (CLASSTYPE_TI_TEMPLATE (type)) = TYPE_IDENTIFIER (type); /* Adjust linkage now that we aren't unnamed anymore. */ reset_type_linkage (type); /* FIXME remangle member functions; member functions of a type with external linkage have external linkage. */ /* Check that our job is done, and that it would fail if we attempted to do it again. */ gcc_assert (!TYPE_UNNAMED_P (type)); } /* Given declspecs and a declarator (abstract or otherwise), determine the name and type of the object declared and construct a DECL node for it. DECLSPECS points to the representation of declaration-specifier sequence that precedes declarator. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. MEMFUNCDEF for a function definition. Like FUNCDEF but prepares to handle member functions (which have FIELD context). Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TPARM for a template parameter declaration. CATCHPARM for a parameter declaration before a catch clause. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. BITFIELD for a field with specified width. INITIALIZED is as for start_decl. ATTRLIST is a pointer to the list of attributes, which may be NULL if there are none; *ATTRLIST may be modified if attributes from inside the declarator should be applied to the declaration. When this function is called, scoping variables (such as CURRENT_CLASS_TYPE) should reflect the scope in which the declaration occurs, not the scope in which the new declaration will be placed. For example, on: void S::f() { ... } when grokdeclarator is called for `S::f', the CURRENT_CLASS_TYPE should not be `S'. Returns a DECL (if a declarator is present), a TYPE (if there is no declarator, in cases like "struct S;"), or the ERROR_MARK_NODE if an error occurs. */ tree grokdeclarator (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, enum decl_context decl_context, int initialized, tree* attrlist) { tree type = NULL_TREE; int longlong = 0; int explicit_intN = 0; int int_n_alt = 0; int virtualp, explicitp, friendp, inlinep, staticp; int explicit_int = 0; int explicit_char = 0; int defaulted_int = 0; tree typedef_decl = NULL_TREE; const char *name = NULL; tree typedef_type = NULL_TREE; /* True if this declarator is a function definition. */ bool funcdef_flag = false; cp_declarator_kind innermost_code = cdk_error; int bitfield = 0; #if 0 /* See the code below that used this. */ tree decl_attr = NULL_TREE; #endif /* Keep track of what sort of function is being processed so that we can warn about default return values, or explicit return values which do not match prescribed defaults. */ special_function_kind sfk = sfk_none; tree dname = NULL_TREE; tree ctor_return_type = NULL_TREE; enum overload_flags flags = NO_SPECIAL; /* cv-qualifiers that apply to the declarator, for a declaration of a member function. */ cp_cv_quals memfn_quals = TYPE_UNQUALIFIED; /* virt-specifiers that apply to the declarator, for a declaration of a member function. */ cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; /* ref-qualifier that applies to the declarator, for a declaration of a member function. */ cp_ref_qualifier rqual = REF_QUAL_NONE; /* cv-qualifiers that apply to the type specified by the DECLSPECS. */ int type_quals = get_type_quals (declspecs); tree raises = NULL_TREE; int template_count = 0; tree returned_attrs = NULL_TREE; tree parms = NULL_TREE; const cp_declarator *id_declarator; /* The unqualified name of the declarator; either an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_id; /* The class type, if any, in which this entity is located, or NULL_TREE if none. Note that this value may be different from the current class type; for example if an attempt is made to declare "A::f" inside "B", this value will be "A". */ tree ctype = current_class_type; /* The NAMESPACE_DECL for the namespace in which this entity is located. If an unqualified name is used to declare the entity, this value will be NULL_TREE, even if the entity is located at namespace scope. */ tree in_namespace = NULL_TREE; cp_storage_class storage_class; bool unsigned_p, signed_p, short_p, long_p, thread_p; bool type_was_error_mark_node = false; bool parameter_pack_p = declarator ? declarator->parameter_pack_p : false; bool template_type_arg = false; bool template_parm_flag = false; bool typedef_p = decl_spec_seq_has_spec_p (declspecs, ds_typedef); bool constexpr_p = decl_spec_seq_has_spec_p (declspecs, ds_constexpr); bool constinit_p = decl_spec_seq_has_spec_p (declspecs, ds_constinit); bool consteval_p = decl_spec_seq_has_spec_p (declspecs, ds_consteval); bool late_return_type_p = false; bool array_parameter_p = false; tree reqs = NULL_TREE; signed_p = decl_spec_seq_has_spec_p (declspecs, ds_signed); unsigned_p = decl_spec_seq_has_spec_p (declspecs, ds_unsigned); short_p = decl_spec_seq_has_spec_p (declspecs, ds_short); long_p = decl_spec_seq_has_spec_p (declspecs, ds_long); longlong = decl_spec_seq_has_spec_p (declspecs, ds_long_long); explicit_intN = declspecs->explicit_intN_p; int_n_alt = declspecs->int_n_alt; thread_p = decl_spec_seq_has_spec_p (declspecs, ds_thread); // Was concept_p specified? Note that ds_concept // implies ds_constexpr! bool concept_p = decl_spec_seq_has_spec_p (declspecs, ds_concept); if (concept_p) constexpr_p = true; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; else if (decl_context == MEMFUNCDEF) funcdef_flag = true, decl_context = FIELD; else if (decl_context == BITFIELD) bitfield = 1, decl_context = FIELD; else if (decl_context == TEMPLATE_TYPE_ARG) template_type_arg = true, decl_context = TYPENAME; else if (decl_context == TPARM) template_parm_flag = true, decl_context = PARM; if (initialized > 1) funcdef_flag = true; location_t typespec_loc = smallest_type_location (type_quals, declspecs->locations); if (typespec_loc == UNKNOWN_LOCATION) typespec_loc = input_location; location_t id_loc = declarator ? declarator->id_loc : input_location; if (id_loc == UNKNOWN_LOCATION) id_loc = input_location; /* Look inside a declarator for the name being declared and get it as a string, for an error message. */ for (id_declarator = declarator; id_declarator; id_declarator = id_declarator->declarator) { if (id_declarator->kind != cdk_id) innermost_code = id_declarator->kind; switch (id_declarator->kind) { case cdk_function: if (id_declarator->declarator && id_declarator->declarator->kind == cdk_id) { sfk = id_declarator->declarator->u.id.sfk; if (sfk == sfk_destructor) flags = DTOR_FLAG; } break; case cdk_id: { tree qualifying_scope = id_declarator->u.id.qualifying_scope; tree decl = id_declarator->u.id.unqualified_name; if (!decl) break; if (qualifying_scope) { if (check_for_bare_parameter_packs (qualifying_scope, id_declarator->id_loc)) return error_mark_node; if (at_function_scope_p ()) { /* [dcl.meaning] A declarator-id shall not be qualified except for ... None of the cases are permitted in block scope. */ if (qualifying_scope == global_namespace) error ("invalid use of qualified-name %<::%D%>", decl); else if (TYPE_P (qualifying_scope)) error ("invalid use of qualified-name %<%T::%D%>", qualifying_scope, decl); else error ("invalid use of qualified-name %<%D::%D%>", qualifying_scope, decl); return error_mark_node; } else if (TYPE_P (qualifying_scope)) { ctype = qualifying_scope; if (!MAYBE_CLASS_TYPE_P (ctype)) { error_at (id_declarator->id_loc, "%q#T is not a class or namespace", ctype); ctype = NULL_TREE; } else if (innermost_code != cdk_function && current_class_type && !uniquely_derived_from_p (ctype, current_class_type)) { error_at (id_declarator->id_loc, "invalid use of qualified-name %<%T::%D%>", qualifying_scope, decl); return error_mark_node; } } else if (TREE_CODE (qualifying_scope) == NAMESPACE_DECL) in_namespace = qualifying_scope; } switch (TREE_CODE (decl)) { case BIT_NOT_EXPR: { if (innermost_code != cdk_function) { error_at (EXPR_LOCATION (decl), "declaration of %qE as non-function", decl); return error_mark_node; } else if (!qualifying_scope && !(current_class_type && at_class_scope_p ())) { error_at (EXPR_LOCATION (decl), "declaration of %qE as non-member", decl); return error_mark_node; } tree type = TREE_OPERAND (decl, 0); if (TYPE_P (type)) type = constructor_name (type); name = identifier_to_locale (IDENTIFIER_POINTER (type)); dname = decl; } break; case TEMPLATE_ID_EXPR: { tree fns = TREE_OPERAND (decl, 0); dname = fns; if (!identifier_p (dname)) dname = OVL_NAME (dname); } /* Fall through. */ case IDENTIFIER_NODE: if (identifier_p (decl)) dname = decl; if (IDENTIFIER_KEYWORD_P (dname)) { error ("declarator-id missing; using reserved word %qD", dname); name = identifier_to_locale (IDENTIFIER_POINTER (dname)); } else if (!IDENTIFIER_CONV_OP_P (dname)) name = identifier_to_locale (IDENTIFIER_POINTER (dname)); else { gcc_assert (flags == NO_SPECIAL); flags = TYPENAME_FLAG; sfk = sfk_conversion; tree glob = get_global_binding (dname); if (glob && TREE_CODE (glob) == TYPE_DECL) name = identifier_to_locale (IDENTIFIER_POINTER (dname)); else name = "<invalid operator>"; } break; default: gcc_unreachable (); } break; } case cdk_array: case cdk_pointer: case cdk_reference: case cdk_ptrmem: break; case cdk_decomp: name = "structured binding"; break; case cdk_error: return error_mark_node; default: gcc_unreachable (); } if (id_declarator->kind == cdk_id) break; } /* [dcl.fct.edf] The declarator in a function-definition shall have the form D1 ( parameter-declaration-clause) ... */ if (funcdef_flag && innermost_code != cdk_function) { error_at (id_loc, "function definition does not declare parameters"); return error_mark_node; } if (flags == TYPENAME_FLAG && innermost_code != cdk_function && ! (ctype && !declspecs->any_specifiers_p)) { error_at (id_loc, "declaration of %qD as non-function", dname); return error_mark_node; } if (dname && identifier_p (dname)) { if (UDLIT_OPER_P (dname) && innermost_code != cdk_function) { error_at (id_loc, "declaration of %qD as non-function", dname); return error_mark_node; } if (IDENTIFIER_ANY_OP_P (dname)) { if (typedef_p) { error_at (id_loc, "declaration of %qD as %<typedef%>", dname); return error_mark_node; } else if (decl_context == PARM || decl_context == CATCHPARM) { error_at (id_loc, "declaration of %qD as parameter", dname); return error_mark_node; } } } /* Anything declared one level down from the top level must be one of the parameters of a function (because the body is at least two levels down). */ /* This heuristic cannot be applied to C++ nodes! Fixed, however, by not allowing C++ class definitions to specify their parameters with xdecls (must be spec.d in the parmlist). Since we now wait to push a class scope until we are sure that we are in a legitimate method context, we must set oldcname explicitly (since current_class_name is not yet alive). We also want to avoid calling this a PARM if it is in a namespace. */ if (decl_context == NORMAL && !toplevel_bindings_p ()) { cp_binding_level *b = current_binding_level; current_binding_level = b->level_chain; if (current_binding_level != 0 && toplevel_bindings_p ()) decl_context = PARM; current_binding_level = b; } if (name == NULL) name = decl_context == PARM ? "parameter" : "type name"; if (consteval_p && constexpr_p) { error_at (declspecs->locations[ds_consteval], "both %qs and %qs specified", "constexpr", "consteval"); return error_mark_node; } if (concept_p && typedef_p) { error_at (declspecs->locations[ds_concept], "%qs cannot appear in a typedef declaration", "concept"); return error_mark_node; } if (constexpr_p && typedef_p) { error_at (declspecs->locations[ds_constexpr], "%qs cannot appear in a typedef declaration", "constexpr"); return error_mark_node; } if (consteval_p && typedef_p) { error_at (declspecs->locations[ds_consteval], "%qs cannot appear in a typedef declaration", "consteval"); return error_mark_node; } if (constinit_p && typedef_p) { error_at (declspecs->locations[ds_constinit], "%qs cannot appear in a typedef declaration", "constinit"); return error_mark_node; } /* [dcl.spec]/2 "At most one of the constexpr, consteval, and constinit keywords shall appear in a decl-specifier-seq." */ if (constinit_p && constexpr_p) { gcc_rich_location richloc (declspecs->locations[ds_constinit]); richloc.add_range (declspecs->locations[ds_constexpr]); error_at (&richloc, "can use at most one of the %<constinit%> and %<constexpr%> " "specifiers"); return error_mark_node; } /* If there were multiple types specified in the decl-specifier-seq, issue an error message. */ if (declspecs->multiple_types_p) { error_at (typespec_loc, "two or more data types in declaration of %qs", name); return error_mark_node; } if (declspecs->conflicting_specifiers_p) { error_at (min_location (declspecs->locations[ds_typedef], declspecs->locations[ds_storage_class]), "conflicting specifiers in declaration of %qs", name); return error_mark_node; } /* Extract the basic type from the decl-specifier-seq. */ type = declspecs->type; if (type == error_mark_node) { type = NULL_TREE; type_was_error_mark_node = true; } cp_warn_deprecated_use (type); if (type && TREE_CODE (type) == TYPE_DECL) { cp_warn_deprecated_use_scopes (CP_DECL_CONTEXT (type)); typedef_decl = type; type = TREE_TYPE (typedef_decl); if (DECL_ARTIFICIAL (typedef_decl)) cp_warn_deprecated_use (type); } /* No type at all: default to `int', and set DEFAULTED_INT because it was not a user-defined typedef. */ if (type == NULL_TREE) { if (signed_p || unsigned_p || long_p || short_p) { /* These imply 'int'. */ type = integer_type_node; defaulted_int = 1; } /* If we just have "complex", it is equivalent to "complex double". */ else if (!longlong && !explicit_intN && decl_spec_seq_has_spec_p (declspecs, ds_complex)) { type = double_type_node; pedwarn (declspecs->locations[ds_complex], OPT_Wpedantic, "ISO C++ does not support plain %<complex%> meaning " "%<double complex%>"); } } /* Gather flags. */ explicit_int = declspecs->explicit_int_p; explicit_char = declspecs->explicit_char_p; #if 0 /* See the code below that used this. */ if (typedef_decl) decl_attr = DECL_ATTRIBUTES (typedef_decl); #endif typedef_type = type; if (sfk == sfk_conversion || sfk == sfk_deduction_guide) ctor_return_type = TREE_TYPE (dname); else ctor_return_type = ctype; if (sfk != sfk_none) { type = check_special_function_return_type (sfk, type, ctor_return_type, type_quals, declspecs->locations); type_quals = TYPE_UNQUALIFIED; } else if (type == NULL_TREE) { int is_main; explicit_int = -1; /* We handle `main' specially here, because 'main () { }' is so common. With no options, it is allowed. With -Wreturn-type, it is a warning. It is only an error with -pedantic-errors. */ is_main = (funcdef_flag && dname && identifier_p (dname) && MAIN_NAME_P (dname) && ctype == NULL_TREE && in_namespace == NULL_TREE && current_namespace == global_namespace); if (type_was_error_mark_node) /* We've already issued an error, don't complain more. */; else if (in_system_header_at (id_loc) || flag_ms_extensions) /* Allow it, sigh. */; else if (! is_main) permerror (id_loc, "ISO C++ forbids declaration of %qs with no type", name); else if (pedantic) pedwarn (id_loc, OPT_Wpedantic, "ISO C++ forbids declaration of %qs with no type", name); else warning_at (id_loc, OPT_Wreturn_type, "ISO C++ forbids declaration of %qs with no type", name); if (type_was_error_mark_node && template_parm_flag) /* FIXME we should be able to propagate the error_mark_node as is for other contexts too. */ type = error_mark_node; else type = integer_type_node; } ctype = NULL_TREE; if (explicit_intN) { if (! int_n_enabled_p[declspecs->int_n_idx]) { error_at (declspecs->locations[ds_type_spec], "%<__int%d%> is not supported by this target", int_n_data[declspecs->int_n_idx].bitsize); explicit_intN = false; } /* Don't pedwarn if the alternate "__intN__" form has been used instead of "__intN". */ else if (!int_n_alt && pedantic) pedwarn (declspecs->locations[ds_type_spec], OPT_Wpedantic, "ISO C++ does not support %<__int%d%> for %qs", int_n_data[declspecs->int_n_idx].bitsize, name); } /* Now process the modifiers that were specified and check for invalid combinations. */ /* Long double is a special combination. */ if (long_p && !longlong && TYPE_MAIN_VARIANT (type) == double_type_node) { long_p = false; type = cp_build_qualified_type (long_double_type_node, cp_type_quals (type)); } /* Check all other uses of type modifiers. */ if (unsigned_p || signed_p || long_p || short_p) { location_t loc; const char *key; if (unsigned_p) { key = "unsigned"; loc = declspecs->locations[ds_unsigned]; } else if (signed_p) { key = "signed"; loc = declspecs->locations[ds_signed]; } else if (longlong) { key = "long long"; loc = declspecs->locations[ds_long_long]; } else if (long_p) { key = "long"; loc = declspecs->locations[ds_long]; } else /* if (short_p) */ { key = "short"; loc = declspecs->locations[ds_short]; } int ok = 0; if (signed_p && unsigned_p) { gcc_rich_location richloc (declspecs->locations[ds_signed]); richloc.add_range (declspecs->locations[ds_unsigned]); error_at (&richloc, "%<signed%> and %<unsigned%> specified together"); } else if (long_p && short_p) { gcc_rich_location richloc (declspecs->locations[ds_long]); richloc.add_range (declspecs->locations[ds_short]); error_at (&richloc, "%<long%> and %<short%> specified together"); } else if (TREE_CODE (type) != INTEGER_TYPE || type == char8_type_node || type == char16_type_node || type == char32_type_node || ((long_p || short_p) && (explicit_char || explicit_intN))) error_at (loc, "%qs specified with %qT", key, type); else if (!explicit_int && !defaulted_int && !explicit_char && !explicit_intN) { if (typedef_decl) { pedwarn (loc, OPT_Wpedantic, "%qs specified with %qT", key, type); ok = !flag_pedantic_errors; } else if (declspecs->decltype_p) error_at (loc, "%qs specified with %<decltype%>", key); else error_at (loc, "%qs specified with %<typeof%>", key); } else ok = 1; /* Discard the type modifiers if they are invalid. */ if (! ok) { unsigned_p = false; signed_p = false; long_p = false; short_p = false; longlong = 0; } } /* Decide whether an integer type is signed or not. Optionally treat bitfields as signed by default. */ if (unsigned_p /* [class.bit] It is implementation-defined whether a plain (neither explicitly signed or unsigned) char, short, int, or long bit-field is signed or unsigned. Naturally, we extend this to long long as well. Note that this does not include wchar_t. */ || (bitfield && !flag_signed_bitfields && !signed_p /* A typedef for plain `int' without `signed' can be controlled just like plain `int', but a typedef for `signed int' cannot be so controlled. */ && !(typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)) && TREE_CODE (type) == INTEGER_TYPE && !same_type_p (TYPE_MAIN_VARIANT (type), wchar_type_node))) { if (explicit_intN) type = int_n_trees[declspecs->int_n_idx].unsigned_type; else if (longlong) type = long_long_unsigned_type_node; else if (long_p) type = long_unsigned_type_node; else if (short_p) type = short_unsigned_type_node; else if (type == char_type_node) type = unsigned_char_type_node; else if (typedef_decl) type = unsigned_type_for (type); else type = unsigned_type_node; } else if (signed_p && type == char_type_node) type = signed_char_type_node; else if (explicit_intN) type = int_n_trees[declspecs->int_n_idx].signed_type; else if (longlong) type = long_long_integer_type_node; else if (long_p) type = long_integer_type_node; else if (short_p) type = short_integer_type_node; if (decl_spec_seq_has_spec_p (declspecs, ds_complex)) { if (TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE) error_at (declspecs->locations[ds_complex], "complex invalid for %qs", name); /* If a modifier is specified, the resulting complex is the complex form of TYPE. E.g, "complex short" is "complex short int". */ else if (type == integer_type_node) type = complex_integer_type_node; else if (type == float_type_node) type = complex_float_type_node; else if (type == double_type_node) type = complex_double_type_node; else if (type == long_double_type_node) type = complex_long_double_type_node; else type = build_complex_type (type); } /* If we're using the injected-class-name to form a compound type or a declaration, replace it with the underlying class so we don't get redundant typedefs in the debug output. But if we are returning the type unchanged, leave it alone so that it's available to maybe_get_template_decl_from_type_decl. */ if (CLASS_TYPE_P (type) && DECL_SELF_REFERENCE_P (TYPE_NAME (type)) && type == TREE_TYPE (TYPE_NAME (type)) && (declarator || type_quals)) type = DECL_ORIGINAL_TYPE (TYPE_NAME (type)); type_quals |= cp_type_quals (type); type = cp_build_qualified_type_real (type, type_quals, ((((typedef_decl && !DECL_ARTIFICIAL (typedef_decl)) || declspecs->decltype_p) ? tf_ignore_bad_quals : 0) | tf_warning_or_error)); /* We might have ignored or rejected some of the qualifiers. */ type_quals = cp_type_quals (type); if (cxx_dialect >= cxx17 && type && is_auto (type) && innermost_code != cdk_function && id_declarator && declarator != id_declarator) if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (type)) { error_at (typespec_loc, "template placeholder type %qT must be followed " "by a simple declarator-id", type); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); type = error_mark_node; } staticp = 0; inlinep = decl_spec_seq_has_spec_p (declspecs, ds_inline); virtualp = decl_spec_seq_has_spec_p (declspecs, ds_virtual); explicitp = decl_spec_seq_has_spec_p (declspecs, ds_explicit); storage_class = declspecs->storage_class; if (storage_class == sc_static) staticp = 1 + (decl_context == FIELD); if (virtualp) { if (staticp == 2) { gcc_rich_location richloc (declspecs->locations[ds_virtual]); richloc.add_range (declspecs->locations[ds_storage_class]); error_at (&richloc, "member %qD cannot be declared both %<virtual%> " "and %<static%>", dname); storage_class = sc_none; staticp = 0; } if (constexpr_p && cxx_dialect < cxx2a) { gcc_rich_location richloc (declspecs->locations[ds_virtual]); richloc.add_range (declspecs->locations[ds_constexpr]); pedwarn (&richloc, OPT_Wpedantic, "member %qD can be declared both " "%<virtual%> and %<constexpr%> only in %<-std=c++2a%> or " "%<-std=gnu++2a%>", dname); } } friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend); /* Issue errors about use of storage classes for parameters. */ if (decl_context == PARM) { if (typedef_p) { error_at (declspecs->locations[ds_typedef], "typedef declaration invalid in parameter declaration"); return error_mark_node; } else if (template_parm_flag && storage_class != sc_none) { error_at (min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]), "storage class specified for template parameter %qs", name); return error_mark_node; } else if (storage_class == sc_static || storage_class == sc_extern || thread_p) { error_at (min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]), "storage class specified for parameter %qs", name); return error_mark_node; } /* Function parameters cannot be concept. */ if (concept_p) { error_at (declspecs->locations[ds_concept], "a parameter cannot be declared %qs", "concept"); concept_p = 0; constexpr_p = 0; } /* Function parameters cannot be constexpr. If we saw one, moan and pretend it wasn't there. */ else if (constexpr_p) { error_at (declspecs->locations[ds_constexpr], "a parameter cannot be declared %qs", "constexpr"); constexpr_p = 0; } if (constinit_p) { error_at (declspecs->locations[ds_constinit], "a parameter cannot be declared %qs", "constinit"); constinit_p = 0; } if (consteval_p) { error_at (declspecs->locations[ds_consteval], "a parameter cannot be declared %qs", "consteval"); consteval_p = 0; } } /* Give error if `virtual' is used outside of class declaration. */ if (virtualp && (current_class_name == NULL_TREE || decl_context != FIELD)) { error_at (declspecs->locations[ds_virtual], "%<virtual%> outside class declaration"); virtualp = 0; } if (innermost_code == cdk_decomp) { location_t loc = (declarator->kind == cdk_reference ? declarator->declarator->id_loc : declarator->id_loc); if (inlinep) error_at (declspecs->locations[ds_inline], "structured binding declaration cannot be %qs", "inline"); if (typedef_p) error_at (declspecs->locations[ds_typedef], "structured binding declaration cannot be %qs", "typedef"); if (constexpr_p && !concept_p) error_at (declspecs->locations[ds_constexpr], "structured " "binding declaration cannot be %qs", "constexpr"); if (consteval_p) error_at (declspecs->locations[ds_consteval], "structured " "binding declaration cannot be %qs", "consteval"); if (thread_p && cxx_dialect < cxx2a) pedwarn (declspecs->locations[ds_thread], 0, "structured binding declaration can be %qs only in " "%<-std=c++2a%> or %<-std=gnu++2a%>", declspecs->gnu_thread_keyword_p ? "__thread" : "thread_local"); if (concept_p) error_at (declspecs->locations[ds_concept], "structured binding declaration cannot be %qs", "concept"); /* [dcl.struct.bind] "A cv that includes volatile is deprecated." */ if (type_quals & TYPE_QUAL_VOLATILE) warning_at (declspecs->locations[ds_volatile], OPT_Wvolatile, "%<volatile%>-qualified structured binding is deprecated"); switch (storage_class) { case sc_none: break; case sc_register: error_at (loc, "structured binding declaration cannot be %qs", "register"); break; case sc_static: if (cxx_dialect < cxx2a) pedwarn (loc, 0, "structured binding declaration can be %qs only in " "%<-std=c++2a%> or %<-std=gnu++2a%>", "static"); break; case sc_extern: error_at (loc, "structured binding declaration cannot be %qs", "extern"); break; case sc_mutable: error_at (loc, "structured binding declaration cannot be %qs", "mutable"); break; case sc_auto: error_at (loc, "structured binding declaration cannot be " "C++98 %<auto%>"); break; default: gcc_unreachable (); } if (TREE_CODE (type) != TEMPLATE_TYPE_PARM || TYPE_IDENTIFIER (type) != auto_identifier) { if (type != error_mark_node) { error_at (loc, "structured binding declaration cannot have " "type %qT", type); inform (loc, "type must be cv-qualified %<auto%> or reference to " "cv-qualified %<auto%>"); } type = build_qualified_type (make_auto (), type_quals); declspecs->type = type; } inlinep = 0; typedef_p = 0; constexpr_p = 0; consteval_p = 0; concept_p = 0; if (storage_class != sc_static) { storage_class = sc_none; declspecs->storage_class = sc_none; } } /* Static anonymous unions are dealt with here. */ if (staticp && decl_context == TYPENAME && declspecs->type && ANON_AGGR_TYPE_P (declspecs->type)) decl_context = FIELD; /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (thread_p && ((storage_class && storage_class != sc_extern && storage_class != sc_static) || typedef_p)) { location_t loc = min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]); error_at (loc, "multiple storage classes in declaration of %qs", name); thread_p = false; } if (decl_context != NORMAL && ((storage_class != sc_none && storage_class != sc_mutable) || thread_p)) { if ((decl_context == PARM || decl_context == CATCHPARM) && (storage_class == sc_register || storage_class == sc_auto)) ; else if (typedef_p) ; else if (decl_context == FIELD /* C++ allows static class elements. */ && storage_class == sc_static) /* C++ also allows inlines and signed and unsigned elements, but in those cases we don't come in here. */ ; else { location_t loc = min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]); if (decl_context == FIELD) error_at (loc, "storage class specified for %qs", name); else if (decl_context == PARM || decl_context == CATCHPARM) error_at (loc, "storage class specified for parameter %qs", name); else error_at (loc, "storage class specified for typename"); if (storage_class == sc_register || storage_class == sc_auto || storage_class == sc_extern || thread_p) storage_class = sc_none; } } else if (storage_class == sc_extern && funcdef_flag && ! toplevel_bindings_p ()) error ("nested function %qs declared %<extern%>", name); else if (toplevel_bindings_p ()) { if (storage_class == sc_auto) error_at (declspecs->locations[ds_storage_class], "top-level declaration of %qs specifies %<auto%>", name); } else if (thread_p && storage_class != sc_extern && storage_class != sc_static) { if (declspecs->gnu_thread_keyword_p) pedwarn (declspecs->locations[ds_thread], 0, "function-scope %qs implicitly auto and " "declared %<__thread%>", name); /* When thread_local is applied to a variable of block scope the storage-class-specifier static is implied if it does not appear explicitly. */ storage_class = declspecs->storage_class = sc_static; staticp = 1; } if (storage_class && friendp) { error_at (min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]), "storage class specifiers invalid in friend function " "declarations"); storage_class = sc_none; staticp = 0; } if (!id_declarator) unqualified_id = NULL_TREE; else { unqualified_id = id_declarator->u.id.unqualified_name; switch (TREE_CODE (unqualified_id)) { case BIT_NOT_EXPR: unqualified_id = TREE_OPERAND (unqualified_id, 0); if (TYPE_P (unqualified_id)) unqualified_id = constructor_name (unqualified_id); break; case IDENTIFIER_NODE: case TEMPLATE_ID_EXPR: break; default: gcc_unreachable (); } } if (declspecs->std_attributes) { location_t attr_loc = declspecs->locations[ds_std_attribute]; if (warning_at (attr_loc, OPT_Wattributes, "attribute ignored")) inform (attr_loc, "an attribute that appertains to a type-specifier " "is ignored"); } /* Determine the type of the entity declared by recurring on the declarator. */ for (; declarator; declarator = declarator->declarator) { const cp_declarator *inner_declarator; tree attrs; if (type == error_mark_node) return error_mark_node; attrs = declarator->attributes; if (attrs) { int attr_flags; attr_flags = 0; if (declarator == NULL || declarator->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; if (declarator->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; if (declarator->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; tree late_attrs = NULL_TREE; if (decl_context != PARM && decl_context != TYPENAME) /* Assume that any attributes that get applied late to templates will DTRT when applied to the declaration as a whole. */ late_attrs = splice_template_attributes (&attrs, type); returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); returned_attrs = chainon (late_attrs, returned_attrs); } inner_declarator = declarator->declarator; /* We don't want to warn in parameter context because we don't yet know if the parse will succeed, and this might turn out to be a constructor call. */ if (decl_context != PARM && decl_context != TYPENAME && !typedef_p && declarator->parenthesized != UNKNOWN_LOCATION /* If the type is class-like and the inner name used a global namespace qualifier, we need the parens. Unfortunately all we can tell is whether a qualified name was used or not. */ && !(inner_declarator && inner_declarator->kind == cdk_id && inner_declarator->u.id.qualifying_scope && (MAYBE_CLASS_TYPE_P (type) || TREE_CODE (type) == ENUMERAL_TYPE))) warning_at (declarator->parenthesized, OPT_Wparentheses, "unnecessary parentheses in declaration of %qs", name); if (declarator->kind == cdk_id || declarator->kind == cdk_decomp) break; switch (declarator->kind) { case cdk_array: type = create_array_type_for_decl (dname, type, declarator->u.array.bounds, declarator->id_loc); if (!valid_array_size_p (dname ? declarator->id_loc : input_location, type, dname)) type = error_mark_node; if (declarator->std_attributes) /* [dcl.array]/1: The optional attribute-specifier-seq appertains to the array. */ returned_attrs = chainon (returned_attrs, declarator->std_attributes); break; case cdk_function: { tree arg_types; int funcdecl_p; /* Declaring a function type. */ { iloc_sentinel ils (declspecs->locations[ds_type_spec]); abstract_virtuals_error (ACU_RETURN, type); } /* Pick up type qualifiers which should be applied to `this'. */ memfn_quals = declarator->u.function.qualifiers; /* Pick up virt-specifiers. */ virt_specifiers = declarator->u.function.virt_specifiers; /* And ref-qualifier, too */ rqual = declarator->u.function.ref_qualifier; /* And tx-qualifier. */ tree tx_qual = declarator->u.function.tx_qualifier; /* Pick up the exception specifications. */ raises = declarator->u.function.exception_specification; /* If the exception-specification is ill-formed, let's pretend there wasn't one. */ if (raises == error_mark_node) raises = NULL_TREE; if (reqs) error_at (location_of (reqs), "requires-clause on return type"); reqs = declarator->u.function.requires_clause; /* Say it's a definition only for the CALL_EXPR closest to the identifier. */ funcdecl_p = inner_declarator && inner_declarator->kind == cdk_id; /* Handle a late-specified return type. */ tree late_return_type = declarator->u.function.late_return_type; if (funcdecl_p /* This is the case e.g. for using T = auto () -> int. */ || inner_declarator == NULL) { if (tree auto_node = type_uses_auto (type)) { if (!late_return_type) { if (current_class_type && LAMBDA_TYPE_P (current_class_type)) /* OK for C++11 lambdas. */; else if (cxx_dialect < cxx14) { error_at (typespec_loc, "%qs function uses " "%<auto%> type specifier without " "trailing return type", name); inform (typespec_loc, "deduced return type only available " "with %<-std=c++14%> or %<-std=gnu++14%>"); } else if (virtualp) { error_at (typespec_loc, "virtual function " "cannot have deduced return type"); virtualp = false; } } else if (!is_auto (type) && sfk != sfk_conversion) { error_at (typespec_loc, "%qs function with trailing " "return type has %qT as its type rather " "than plain %<auto%>", name, type); return error_mark_node; } else if (is_auto (type) && AUTO_IS_DECLTYPE (type)) { if (funcdecl_p) error_at (typespec_loc, "%qs function with trailing return type " "has %<decltype(auto)%> as its type " "rather than plain %<auto%>", name); else error_at (typespec_loc, "invalid use of %<decltype(auto)%>"); return error_mark_node; } tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node); if (!tmpl) if (tree late_auto = type_uses_auto (late_return_type)) tmpl = CLASS_PLACEHOLDER_TEMPLATE (late_auto); if (tmpl && funcdecl_p) { if (!dguide_name_p (unqualified_id)) { error_at (declarator->id_loc, "deduced class " "type %qD in function return type", DECL_NAME (tmpl)); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); return error_mark_node; } else if (!late_return_type) { error_at (declarator->id_loc, "deduction guide " "for %qT must have trailing return " "type", TREE_TYPE (tmpl)); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); return error_mark_node; } else if (CLASS_TYPE_P (late_return_type) && CLASSTYPE_TEMPLATE_INFO (late_return_type) && (CLASSTYPE_TI_TEMPLATE (late_return_type) == tmpl)) /* OK */; else error ("trailing return type %qT of deduction guide " "is not a specialization of %qT", late_return_type, TREE_TYPE (tmpl)); } } else if (late_return_type && sfk != sfk_conversion) { if (late_return_type == error_mark_node) return error_mark_node; if (cxx_dialect < cxx11) /* Not using maybe_warn_cpp0x because this should always be an error. */ error_at (typespec_loc, "trailing return type only available " "with %<-std=c++11%> or %<-std=gnu++11%>"); else error_at (typespec_loc, "%qs function with trailing " "return type not declared with %<auto%> " "type specifier", name); return error_mark_node; } } type = splice_late_return_type (type, late_return_type); if (type == error_mark_node) return error_mark_node; if (late_return_type) { late_return_type_p = true; type_quals = cp_type_quals (type); } if (type_quals != TYPE_UNQUALIFIED) { if (SCALAR_TYPE_P (type) || VOID_TYPE_P (type)) warning_at (typespec_loc, OPT_Wignored_qualifiers, "type " "qualifiers ignored on function return type"); /* [dcl.fct] "A volatile-qualified return type is deprecated." */ if (type_quals & TYPE_QUAL_VOLATILE) warning_at (typespec_loc, OPT_Wvolatile, "%<volatile%>-qualified return type is " "deprecated"); /* We now know that the TYPE_QUALS don't apply to the decl, but to its return type. */ type_quals = TYPE_UNQUALIFIED; } /* Error about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (typespec_loc, "%qs declared as function returning " "a function", name); return error_mark_node; } if (TREE_CODE (type) == ARRAY_TYPE) { error_at (typespec_loc, "%qs declared as function returning " "an array", name); return error_mark_node; } if (constinit_p) { error_at (declspecs->locations[ds_constinit], "%<constinit%> on function return type is not " "allowed"); return error_mark_node; } /* Only plain decltype(auto) is allowed. */ if (tree a = type_uses_auto (type)) { if (AUTO_IS_DECLTYPE (a) && a != type) { error_at (typespec_loc, "%qT as type rather than " "plain %<decltype(auto)%>", type); return error_mark_node; } } if (ctype == NULL_TREE && decl_context == FIELD && funcdecl_p && friendp == 0) ctype = current_class_type; if (ctype && (sfk == sfk_constructor || sfk == sfk_destructor)) { /* We are within a class's scope. If our declarator name is the same as the class name, and we are defining a function, then it is a constructor/destructor, and therefore returns a void type. */ /* ISO C++ 12.4/2. A destructor may not be declared const or volatile. A destructor may not be static. A destructor may not be declared with ref-qualifier. ISO C++ 12.1. A constructor may not be declared const or volatile. A constructor may not be virtual. A constructor may not be static. A constructor may not be declared with ref-qualifier. */ if (staticp == 2) error_at (declspecs->locations[ds_storage_class], (flags == DTOR_FLAG) ? G_("destructor cannot be static member " "function") : G_("constructor cannot be static member " "function")); if (memfn_quals) { error ((flags == DTOR_FLAG) ? G_("destructors may not be cv-qualified") : G_("constructors may not be cv-qualified")); memfn_quals = TYPE_UNQUALIFIED; } if (rqual) { maybe_warn_cpp0x (CPP0X_REF_QUALIFIER); error ((flags == DTOR_FLAG) ? G_("destructors may not be ref-qualified") : G_("constructors may not be ref-qualified")); rqual = REF_QUAL_NONE; } if (decl_context == FIELD && !member_function_or_else (ctype, current_class_type, flags)) return error_mark_node; if (flags != DTOR_FLAG) { /* It's a constructor. */ if (explicitp == 1) explicitp = 2; if (virtualp) { permerror (declspecs->locations[ds_virtual], "constructors cannot be declared %<virtual%>"); virtualp = 0; } if (decl_context == FIELD && sfk != sfk_constructor) return error_mark_node; } if (decl_context == FIELD) staticp = 0; } else if (friendp) { if (virtualp) { /* Cannot be both friend and virtual. */ gcc_rich_location richloc (declspecs->locations[ds_virtual]); richloc.add_range (declspecs->locations[ds_friend]); error_at (&richloc, "virtual functions cannot be friends"); friendp = 0; } if (decl_context == NORMAL) error_at (declarator->id_loc, "friend declaration not in class definition"); if (current_function_decl && funcdef_flag) { error_at (declarator->id_loc, "cannot define friend function %qs in a local " "class definition", name); friendp = 0; } /* [class.friend]/6: A function can be defined in a friend declaration if the function name is unqualified. */ if (funcdef_flag && in_namespace) { if (in_namespace == global_namespace) error_at (declarator->id_loc, "friend function definition %qs cannot have " "a name qualified with %<::%>", name); else error_at (declarator->id_loc, "friend function definition %qs cannot have " "a name qualified with %<%D::%>", name, in_namespace); } } else if (ctype && sfk == sfk_conversion) { if (explicitp == 1) { maybe_warn_cpp0x (CPP0X_EXPLICIT_CONVERSION); explicitp = 2; } if (late_return_type_p) error ("a conversion function cannot have a trailing return type"); } else if (sfk == sfk_deduction_guide) { if (explicitp == 1) explicitp = 2; } tree pushed_scope = NULL_TREE; if (funcdecl_p && decl_context != FIELD && inner_declarator->u.id.qualifying_scope && CLASS_TYPE_P (inner_declarator->u.id.qualifying_scope)) pushed_scope = push_scope (inner_declarator->u.id.qualifying_scope); arg_types = grokparms (declarator->u.function.parameters, &parms); if (pushed_scope) pop_scope (pushed_scope); if (inner_declarator && inner_declarator->kind == cdk_id && inner_declarator->u.id.sfk == sfk_destructor && arg_types != void_list_node) { error_at (declarator->id_loc, "destructors may not have parameters"); arg_types = void_list_node; parms = NULL_TREE; } type = build_function_type (type, arg_types); tree attrs = declarator->std_attributes; if (tx_qual) { tree att = build_tree_list (tx_qual, NULL_TREE); /* transaction_safe applies to the type, but transaction_safe_dynamic applies to the function. */ if (is_attribute_p ("transaction_safe", tx_qual)) attrs = chainon (attrs, att); else returned_attrs = chainon (returned_attrs, att); } if (attrs) /* [dcl.fct]/2: The optional attribute-specifier-seq appertains to the function type. */ cplus_decl_attributes (&type, attrs, 0); if (raises) type = build_exception_variant (type, raises); } break; case cdk_pointer: case cdk_reference: case cdk_ptrmem: /* Filter out pointers-to-references and references-to-references. We can get these if a TYPE_DECL is used. */ if (TYPE_REF_P (type)) { if (declarator->kind != cdk_reference) { error ("cannot declare pointer to %q#T", type); type = TREE_TYPE (type); } /* In C++0x, we allow reference to reference declarations that occur indirectly through typedefs [7.1.3/8 dcl.typedef] and template type arguments [14.3.1/4 temp.arg.type]. The check for direct reference to reference declarations, which are still forbidden, occurs below. Reasoning behind the change can be found in DR106, DR540, and the rvalue reference proposals. */ else if (cxx_dialect == cxx98) { error ("cannot declare reference to %q#T", type); type = TREE_TYPE (type); } } else if (VOID_TYPE_P (type)) { if (declarator->kind == cdk_reference) error ("cannot declare reference to %q#T", type); else if (declarator->kind == cdk_ptrmem) error ("cannot declare pointer to %q#T member", type); } /* We now know that the TYPE_QUALS don't apply to the decl, but to the target of the pointer. */ type_quals = TYPE_UNQUALIFIED; /* This code used to handle METHOD_TYPE, but I don't think it's possible to get it here anymore. */ gcc_assert (TREE_CODE (type) != METHOD_TYPE); if (declarator->kind == cdk_ptrmem && TREE_CODE (type) == FUNCTION_TYPE) { memfn_quals |= type_memfn_quals (type); type = build_memfn_type (type, declarator->u.pointer.class_type, memfn_quals, rqual); if (type == error_mark_node) return error_mark_node; rqual = REF_QUAL_NONE; memfn_quals = TYPE_UNQUALIFIED; } if (TREE_CODE (type) == FUNCTION_TYPE && (type_memfn_quals (type) != TYPE_UNQUALIFIED || type_memfn_rqual (type) != REF_QUAL_NONE)) error (declarator->kind == cdk_reference ? G_("cannot declare reference to qualified function type %qT") : G_("cannot declare pointer to qualified function type %qT"), type); /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We handle the NORMAL and FIELD contexts here by inserting a dummy statement that just evaluates the size at a safe point and ensures it is not deferred until e.g. within a deeper conditional context (c++/43555). We expect nothing to be needed here for PARM or TYPENAME. Evaluating the size at this point for TYPENAME would actually be incorrect, as we might be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the size evaluation could end up prior to the side effects. */ if (!TYPE_NAME (type) && (decl_context == NORMAL || decl_context == FIELD) && at_function_scope_p () && variably_modified_type_p (type, NULL_TREE)) { TYPE_NAME (type) = build_decl (UNKNOWN_LOCATION, TYPE_DECL, NULL_TREE, type); add_decl_expr (TYPE_NAME (type)); } if (declarator->kind == cdk_reference) { /* In C++0x, the type we are creating a reference to might be a typedef which is itself a reference type. In that case, we follow the reference collapsing rules in [7.1.3/8 dcl.typedef] to create the final reference type: "If a typedef TD names a type that is a reference to a type T, an attempt to create the type 'lvalue reference to cv TD' creates the type 'lvalue reference to T,' while an attempt to create the type "rvalue reference to cv TD' creates the type TD." */ if (VOID_TYPE_P (type)) /* We already gave an error. */; else if (TYPE_REF_P (type)) { if (declarator->u.reference.rvalue_ref) /* Leave type alone. */; else type = cp_build_reference_type (TREE_TYPE (type), false); } else type = cp_build_reference_type (type, declarator->u.reference.rvalue_ref); /* In C++0x, we need this check for direct reference to reference declarations, which are forbidden by [8.3.2/5 dcl.ref]. Reference to reference declarations are only allowed indirectly through typedefs and template type arguments. Example: void foo(int & &); // invalid ref-to-ref decl typedef int & int_ref; void foo(int_ref &); // valid ref-to-ref decl */ if (inner_declarator && inner_declarator->kind == cdk_reference) error ("cannot declare reference to %q#T, which is not " "a typedef or a template type argument", type); } else if (TREE_CODE (type) == METHOD_TYPE) type = build_ptrmemfunc_type (build_pointer_type (type)); else if (declarator->kind == cdk_ptrmem) { gcc_assert (TREE_CODE (declarator->u.pointer.class_type) != NAMESPACE_DECL); if (declarator->u.pointer.class_type == error_mark_node) /* We will already have complained. */ type = error_mark_node; else type = build_ptrmem_type (declarator->u.pointer.class_type, type); } else type = build_pointer_type (type); /* Process a list of type modifier keywords (such as const or volatile) that were given inside the `*' or `&'. */ if (declarator->u.pointer.qualifiers) { type = cp_build_qualified_type (type, declarator->u.pointer.qualifiers); type_quals = cp_type_quals (type); } /* Apply C++11 attributes to the pointer, and not to the type pointed to. This is unlike what is done for GNU attributes above. It is to comply with [dcl.ptr]/1: [the optional attribute-specifier-seq (7.6.1) appertains to the pointer and not to the object pointed to]. */ if (declarator->std_attributes) decl_attributes (&type, declarator->std_attributes, 0); ctype = NULL_TREE; break; case cdk_error: break; default: gcc_unreachable (); } } id_loc = declarator ? declarator->id_loc : input_location; /* A `constexpr' specifier used in an object declaration declares the object as `const'. */ if (constexpr_p && innermost_code != cdk_function) { /* DR1688 says that a `constexpr' specifier in combination with `volatile' is valid. */ if (!TYPE_REF_P (type)) { type_quals |= TYPE_QUAL_CONST; type = cp_build_qualified_type (type, type_quals); } } if (unqualified_id && TREE_CODE (unqualified_id) == TEMPLATE_ID_EXPR && !FUNC_OR_METHOD_TYPE_P (type) && !variable_template_p (TREE_OPERAND (unqualified_id, 0))) { error ("template-id %qD used as a declarator", unqualified_id); unqualified_id = dname; } /* If TYPE is a FUNCTION_TYPE, but the function name was explicitly qualified with a class-name, turn it into a METHOD_TYPE, unless we know that the function is static. We take advantage of this opportunity to do other processing that pertains to entities explicitly declared to be class members. Note that if DECLARATOR is non-NULL, we know it is a cdk_id declarator; otherwise, we would not have exited the loop above. */ if (declarator && declarator->kind == cdk_id && declarator->u.id.qualifying_scope && MAYBE_CLASS_TYPE_P (declarator->u.id.qualifying_scope)) { ctype = declarator->u.id.qualifying_scope; ctype = TYPE_MAIN_VARIANT (ctype); template_count = num_template_headers_for_class (ctype); if (ctype == current_class_type) { if (friendp) { permerror (declspecs->locations[ds_friend], "member functions are implicitly " "friends of their class"); friendp = 0; } else permerror (id_loc, "extra qualification %<%T::%> on member %qs", ctype, name); } else if (/* If the qualifying type is already complete, then we can skip the following checks. */ !COMPLETE_TYPE_P (ctype) && (/* If the function is being defined, then qualifying type must certainly be complete. */ funcdef_flag /* A friend declaration of "T::f" is OK, even if "T" is a template parameter. But, if this function is not a friend, the qualifying type must be a class. */ || (!friendp && !CLASS_TYPE_P (ctype)) /* For a declaration, the type need not be complete, if either it is dependent (since there is no meaningful definition of complete in that case) or the qualifying class is currently being defined. */ || !(dependent_type_p (ctype) || currently_open_class (ctype))) /* Check that the qualifying type is complete. */ && !complete_type_or_else (ctype, NULL_TREE)) return error_mark_node; else if (TREE_CODE (type) == FUNCTION_TYPE) { if (current_class_type && (!friendp || funcdef_flag || initialized)) { error_at (id_loc, funcdef_flag || initialized ? G_("cannot define member function %<%T::%s%> " "within %qT") : G_("cannot declare member function %<%T::%s%> " "within %qT"), ctype, name, current_class_type); return error_mark_node; } } else if (typedef_p && current_class_type) { error_at (id_loc, "cannot declare member %<%T::%s%> within %qT", ctype, name, current_class_type); return error_mark_node; } } if (ctype == NULL_TREE && decl_context == FIELD && friendp == 0) ctype = current_class_type; /* Now TYPE has the actual type. */ if (returned_attrs) { if (attrlist) *attrlist = chainon (returned_attrs, *attrlist); else attrlist = &returned_attrs; } if (declarator && declarator->kind == cdk_id && declarator->std_attributes && attrlist != NULL) { /* [dcl.meaning]/1: The optional attribute-specifier-seq following a declarator-id appertains to the entity that is declared. */ if (declarator->std_attributes != error_mark_node) *attrlist = chainon (*attrlist, declarator->std_attributes); else /* We should have already diagnosed the issue (c++/78344). */ gcc_assert (seen_error ()); } /* Handle parameter packs. */ if (parameter_pack_p) { if (decl_context == PARM) /* Turn the type into a pack expansion.*/ type = make_pack_expansion (type); else error ("non-parameter %qs cannot be a parameter pack", name); } if ((decl_context == FIELD || decl_context == PARM) && !processing_template_decl && variably_modified_type_p (type, NULL_TREE)) { if (decl_context == FIELD) error_at (id_loc, "data member may not have variably modified type %qT", type); else error_at (id_loc, "parameter may not have variably modified type %qT", type); type = error_mark_node; } if (explicitp == 1 || (explicitp && friendp)) { /* [dcl.fct.spec] (C++11) The explicit specifier shall be used only in the declaration of a constructor or conversion function within a class definition. */ if (!current_class_type) error_at (declspecs->locations[ds_explicit], "%<explicit%> outside class declaration"); else if (friendp) error_at (declspecs->locations[ds_explicit], "%<explicit%> in friend declaration"); else error_at (declspecs->locations[ds_explicit], "only declarations of constructors and conversion operators " "can be %<explicit%>"); explicitp = 0; } if (storage_class == sc_mutable) { location_t sloc = declspecs->locations[ds_storage_class]; if (decl_context != FIELD || friendp) { error_at (sloc, "non-member %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (decl_context == TYPENAME || typedef_p) { error_at (sloc, "non-object member %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (FUNC_OR_METHOD_TYPE_P (type)) { error_at (sloc, "function %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (staticp) { error_at (sloc, "%<static%> %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (type_quals & TYPE_QUAL_CONST) { error_at (sloc, "%<const%> %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (TYPE_REF_P (type)) { permerror (sloc, "reference %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (typedef_p && decl_context != TYPENAME) { bool alias_p = decl_spec_seq_has_spec_p (declspecs, ds_alias); tree decl; if (funcdef_flag) { if (decl_context == NORMAL) error_at (id_loc, "typedef may not be a function definition"); else error_at (id_loc, "typedef may not be a member function definition"); return error_mark_node; } /* This declaration: typedef void f(int) const; declares a function type which is not a member of any particular class, but which is cv-qualified; for example "f S::*" declares a pointer to a const-qualified member function of S. We record the cv-qualification in the function type. */ if ((rqual || memfn_quals) && TREE_CODE (type) == FUNCTION_TYPE) { type = apply_memfn_quals (type, memfn_quals, rqual); /* We have now dealt with these qualifiers. */ memfn_quals = TYPE_UNQUALIFIED; rqual = REF_QUAL_NONE; } if (type_uses_auto (type)) { if (alias_p) error_at (declspecs->locations[ds_type_spec], "%<auto%> not allowed in alias declaration"); else error_at (declspecs->locations[ds_type_spec], "typedef declared %<auto%>"); type = error_mark_node; } if (reqs) error_at (location_of (reqs), "requires-clause on typedef"); if (id_declarator && declarator->u.id.qualifying_scope) { error_at (id_loc, "typedef name may not be a nested-name-specifier"); type = error_mark_node; } if (decl_context == FIELD) decl = build_lang_decl_loc (id_loc, TYPE_DECL, unqualified_id, type); else decl = build_decl (id_loc, TYPE_DECL, unqualified_id, type); if (decl_context != FIELD) { if (!current_function_decl) DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); else if (DECL_MAYBE_IN_CHARGE_CDTOR_P (current_function_decl)) /* The TYPE_DECL is "abstract" because there will be clones of this constructor/destructor, and there will be copies of this TYPE_DECL generated in those clones. The decloning optimization (for space) may revert this subsequently if it determines that the clones should share a common implementation. */ DECL_ABSTRACT_P (decl) = true; } else if (current_class_type && constructor_name_p (unqualified_id, current_class_type)) permerror (id_loc, "ISO C++ forbids nested type %qD with same name " "as enclosing class", unqualified_id); /* If the user declares "typedef struct {...} foo" then the struct will have an anonymous name. Fill that name in now. Nothing can refer to it, so nothing needs know about the name change. */ if (type != error_mark_node && unqualified_id && TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && TYPE_UNNAMED_P (type) && declspecs->type_definition_p && attributes_naming_typedef_ok (*attrlist) && cp_type_quals (type) == TYPE_UNQUALIFIED) name_unnamed_type (type, decl); if (signed_p || (typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl))) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; bad_specifiers (decl, BSP_TYPE, virtualp, memfn_quals != TYPE_UNQUALIFIED, inlinep, friendp, raises != NULL_TREE, declspecs->locations); if (alias_p) /* Acknowledge that this was written: `using analias = atype;'. */ TYPE_DECL_ALIAS_P (decl) = 1; return decl; } /* Detect the case of an array type of unspecified size which came, as such, direct from a typedef name. We must copy the type, so that the array's domain can be individually set by the object's initializer. */ if (type && typedef_type && TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (typedef_type)) type = build_cplus_array_type (TREE_TYPE (type), NULL_TREE); /* Detect where we're using a typedef of function type to declare a function. PARMS will not be set, so we must create it now. */ if (type == typedef_type && TREE_CODE (type) == FUNCTION_TYPE) { tree decls = NULL_TREE; tree args; for (args = TYPE_ARG_TYPES (type); args && args != void_list_node; args = TREE_CHAIN (args)) { tree decl = cp_build_parm_decl (NULL_TREE, NULL_TREE, TREE_VALUE (args)); DECL_CHAIN (decl) = decls; decls = decl; } parms = nreverse (decls); if (decl_context != TYPENAME) { /* The qualifiers on the function type become the qualifiers on the non-static member function. */ memfn_quals |= type_memfn_quals (type); rqual = type_memfn_rqual (type); type_quals = TYPE_UNQUALIFIED; raises = TYPE_RAISES_EXCEPTIONS (type); } } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that here we don't care about type_quals. */ /* Special case: "friend class foo" looks like a TYPENAME context. */ if (friendp) { if (inlinep) { error ("%<inline%> specified for friend class declaration"); inlinep = 0; } if (!current_aggr) { /* Don't allow friend declaration without a class-key. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) permerror (input_location, "template parameters cannot be friends"); else if (TREE_CODE (type) == TYPENAME_TYPE) permerror (input_location, "friend declaration requires class-key, " "i.e. %<friend class %T::%D%>", TYPE_CONTEXT (type), TYPENAME_TYPE_FULLNAME (type)); else permerror (input_location, "friend declaration requires class-key, " "i.e. %<friend %#T%>", type); } /* Only try to do this stuff if we didn't already give up. */ if (type != integer_type_node) { /* A friendly class? */ if (current_class_type) make_friend_class (current_class_type, TYPE_MAIN_VARIANT (type), /*complain=*/true); else error ("trying to make class %qT a friend of global scope", type); type = void_type_node; } } else if (memfn_quals || rqual) { if (ctype == NULL_TREE && TREE_CODE (type) == METHOD_TYPE) ctype = TYPE_METHOD_BASETYPE (type); if (ctype) type = build_memfn_type (type, ctype, memfn_quals, rqual); /* Core issue #547: need to allow this in template type args. Allow it in general in C++11 for alias-declarations. */ else if ((template_type_arg || cxx_dialect >= cxx11) && TREE_CODE (type) == FUNCTION_TYPE) type = apply_memfn_quals (type, memfn_quals, rqual); else error ("invalid qualifiers on non-member function type"); } if (reqs) error_at (location_of (reqs), "requires-clause on type-id"); return type; } else if (unqualified_id == NULL_TREE && decl_context != PARM && decl_context != CATCHPARM && TREE_CODE (type) != UNION_TYPE && ! bitfield && innermost_code != cdk_decomp) { error ("abstract declarator %qT used as declaration", type); return error_mark_node; } if (!FUNC_OR_METHOD_TYPE_P (type)) { /* Only functions may be declared using an operator-function-id. */ if (dname && IDENTIFIER_ANY_OP_P (dname)) { error_at (id_loc, "declaration of %qD as non-function", dname); return error_mark_node; } if (reqs) error_at (location_of (reqs), "requires-clause on declaration of non-function type %qT", type); } /* We don't check parameter types here because we can emit a better error message later. */ if (decl_context != PARM) { type = check_var_type (unqualified_id, type, id_loc); if (type == error_mark_node) return error_mark_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ if (decl_context == PARM || decl_context == CATCHPARM) { if (ctype || in_namespace) error ("cannot use %<::%> in parameter declaration"); tree auto_node = type_uses_auto (type); if (auto_node && !(cxx_dialect >= cxx17 && template_parm_flag)) { if (cxx_dialect >= cxx14) { if (decl_context == PARM && AUTO_IS_DECLTYPE (auto_node)) error_at (typespec_loc, "cannot declare a parameter with %<decltype(auto)%>"); else error_at (typespec_loc, "%<auto%> parameter not permitted in this context"); } else error_at (typespec_loc, "parameter declared %<auto%>"); type = error_mark_node; } /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. One declared as a member is really a pointer to member. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = build_pointer_type (TREE_TYPE (type)); type_quals = TYPE_UNQUALIFIED; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) type = build_pointer_type (type); } if (ctype && TREE_CODE (type) == FUNCTION_TYPE && staticp < 2 && !(unqualified_id && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id))) { cp_cv_quals real_quals = memfn_quals; if (cxx_dialect < cxx14 && constexpr_p && sfk != sfk_constructor && sfk != sfk_destructor) real_quals |= TYPE_QUAL_CONST; type = build_memfn_type (type, ctype, real_quals, rqual); } { tree decl = NULL_TREE; if (decl_context == PARM) { decl = cp_build_parm_decl (NULL_TREE, unqualified_id, type); DECL_ARRAY_PARAMETER_P (decl) = array_parameter_p; bad_specifiers (decl, BSP_PARM, virtualp, memfn_quals != TYPE_UNQUALIFIED, inlinep, friendp, raises != NULL_TREE, declspecs->locations); } else if (decl_context == FIELD) { if (!staticp && !friendp && TREE_CODE (type) != METHOD_TYPE) if (tree auto_node = type_uses_auto (type)) { location_t tloc = declspecs->locations[ds_type_spec]; if (CLASS_PLACEHOLDER_TEMPLATE (auto_node)) error_at (tloc, "invalid use of template-name %qE without an " "argument list", CLASS_PLACEHOLDER_TEMPLATE (auto_node)); else error_at (tloc, "non-static data member declared with " "placeholder %qT", auto_node); type = error_mark_node; } /* The C99 flexible array extension. */ if (!staticp && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { if (ctype && (TREE_CODE (ctype) == UNION_TYPE || TREE_CODE (ctype) == QUAL_UNION_TYPE)) { error_at (id_loc, "flexible array member in union"); type = error_mark_node; } else { /* Array is a flexible member. */ if (name) pedwarn (id_loc, OPT_Wpedantic, "ISO C++ forbids flexible array member %qs", name); else pedwarn (input_location, OPT_Wpedantic, "ISO C++ forbids flexible array members"); /* Flexible array member has a null domain. */ type = build_cplus_array_type (TREE_TYPE (type), NULL_TREE); } } if (type == error_mark_node) { /* Happens when declaring arrays of sizes which are error_mark_node, for example. */ decl = NULL_TREE; } else if (in_namespace && !friendp) { /* Something like struct S { int N::j; }; */ error_at (id_loc, "invalid use of %<::%>"); return error_mark_node; } else if (FUNC_OR_METHOD_TYPE_P (type) && unqualified_id) { int publicp = 0; tree function_context; if (friendp == 0) { /* This should never happen in pure C++ (the check could be an assert). It could happen in Objective-C++ if someone writes invalid code that uses a function declaration for an instance variable or property (instance variables and properties are parsed as FIELD_DECLs, but they are part of an Objective-C class, not a C++ class). That code is invalid and is caught by this check. */ if (!ctype) { error ("declaration of function %qD in invalid context", unqualified_id); return error_mark_node; } /* ``A union may [ ... ] not [ have ] virtual functions.'' ARM 9.5 */ if (virtualp && TREE_CODE (ctype) == UNION_TYPE) { error_at (declspecs->locations[ds_virtual], "function %qD declared %<virtual%> inside a union", unqualified_id); return error_mark_node; } if (virtualp && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id)) { error_at (declspecs->locations[ds_virtual], "%qD cannot be declared %<virtual%>, since it " "is always static", unqualified_id); virtualp = 0; } } /* Check that the name used for a destructor makes sense. */ if (sfk == sfk_destructor) { tree uqname = id_declarator->u.id.unqualified_name; if (!ctype) { gcc_assert (friendp); error_at (id_loc, "expected qualified name in friend " "declaration for destructor %qD", uqname); return error_mark_node; } if (!check_dtor_name (ctype, TREE_OPERAND (uqname, 0))) { error_at (id_loc, "declaration of %qD as member of %qT", uqname, ctype); return error_mark_node; } if (concept_p) { error_at (declspecs->locations[ds_concept], "a destructor cannot be %qs", "concept"); return error_mark_node; } if (constexpr_p && cxx_dialect < cxx2a) { error_at (declspecs->locations[ds_constexpr], "%<constexpr%> destructors only available" " with %<-std=c++2a%> or %<-std=gnu++2a%>"); return error_mark_node; } if (consteval_p) { error_at (declspecs->locations[ds_consteval], "a destructor cannot be %qs", "consteval"); return error_mark_node; } } else if (sfk == sfk_constructor && friendp && !ctype) { error ("expected qualified name in friend declaration " "for constructor %qD", id_declarator->u.id.unqualified_name); return error_mark_node; } if (sfk == sfk_constructor) if (concept_p) { error_at (declspecs->locations[ds_concept], "a constructor cannot be %<concept%>"); return error_mark_node; } if (concept_p) { error_at (declspecs->locations[ds_concept], "a concept cannot be a member function"); concept_p = false; } else if (consteval_p && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id)) { error_at (declspecs->locations[ds_consteval], "%qD cannot be %qs", unqualified_id, "consteval"); consteval_p = false; } if (TREE_CODE (unqualified_id) == TEMPLATE_ID_EXPR) { tree tmpl = TREE_OPERAND (unqualified_id, 0); if (variable_template_p (tmpl)) { error_at (id_loc, "specialization of variable template " "%qD declared as function", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "variable template declared here"); return error_mark_node; } } /* Tell grokfndecl if it needs to set TREE_PUBLIC on the node. */ function_context = (ctype != NULL_TREE ? decl_function_context (TYPE_MAIN_DECL (ctype)) : NULL_TREE); publicp = ((! friendp || ! staticp) && function_context == NULL_TREE); decl = grokfndecl (ctype, type, TREE_CODE (unqualified_id) != TEMPLATE_ID_EXPR ? unqualified_id : dname, parms, unqualified_id, declspecs, reqs, virtualp, flags, memfn_quals, rqual, raises, friendp ? -1 : 0, friendp, publicp, inlinep | (2 * constexpr_p) | (4 * concept_p) | (8 * consteval_p), initialized == SD_DELETED, sfk, funcdef_flag, late_return_type_p, initialized, template_count, in_namespace, attrlist, id_loc); decl = set_virt_specifiers (decl, virt_specifiers); if (decl == NULL_TREE) return error_mark_node; #if 0 /* This clobbers the attrs stored in `decl' from `attrlist'. */ /* The decl and setting of decl_attr is also turned off. */ decl = build_decl_attribute_variant (decl, decl_attr); #endif /* [class.conv.ctor] A constructor declared without the function-specifier explicit that can be called with a single parameter specifies a conversion from the type of its first parameter to the type of its class. Such a constructor is called a converting constructor. */ if (explicitp == 2) DECL_NONCONVERTING_P (decl) = 1; if (declspecs->explicit_specifier) store_explicit_specifier (decl, declspecs->explicit_specifier); } else if (!staticp && ((current_class_type && same_type_p (type, current_class_type)) || (!dependent_type_p (type) && !COMPLETE_TYPE_P (complete_type (type)) && (!complete_or_array_type_p (type) || initialized == 0)))) { if (TREE_CODE (type) != ARRAY_TYPE || !COMPLETE_TYPE_P (TREE_TYPE (type))) { if (unqualified_id) { error_at (id_loc, "field %qD has incomplete type %qT", unqualified_id, type); cxx_incomplete_type_inform (strip_array_types (type)); } else error ("name %qT has incomplete type", type); type = error_mark_node; decl = NULL_TREE; } } else if (!verify_type_context (input_location, staticp ? TCTX_STATIC_STORAGE : TCTX_FIELD, type)) { type = error_mark_node; decl = NULL_TREE; } else { if (friendp) { if (unqualified_id) error_at (id_loc, "%qE is neither function nor member function; " "cannot be declared friend", unqualified_id); else error ("unnamed field is neither function nor member " "function; cannot be declared friend"); return error_mark_node; } decl = NULL_TREE; } if (friendp) { /* Friends are treated specially. */ if (ctype == current_class_type) ; /* We already issued a permerror. */ else if (decl && DECL_NAME (decl)) { if (initialized) /* Kludge: We need funcdef_flag to be true in do_friend for in-class defaulted functions, but that breaks grokfndecl. So set it here. */ funcdef_flag = true; if (template_class_depth (current_class_type) == 0) { decl = check_explicit_specialization (unqualified_id, decl, template_count, 2 * funcdef_flag + 4); if (decl == error_mark_node) return error_mark_node; } decl = do_friend (ctype, unqualified_id, decl, *attrlist, flags, funcdef_flag); return decl; } else return error_mark_node; } /* Structure field. It may not be a function, except for C++. */ if (decl == NULL_TREE) { if (staticp) { /* C++ allows static class members. All other work for this is done by grokfield. */ decl = build_lang_decl_loc (id_loc, VAR_DECL, unqualified_id, type); set_linkage_for_static_data_member (decl); if (concept_p) error_at (declspecs->locations[ds_concept], "static data member %qE declared %qs", unqualified_id, "concept"); else if (constexpr_p && !initialized) { error_at (DECL_SOURCE_LOCATION (decl), "%<constexpr%> static data member %qD must " "have an initializer", decl); constexpr_p = false; } if (consteval_p) error_at (declspecs->locations[ds_consteval], "static data member %qE declared %qs", unqualified_id, "consteval"); if (inlinep) mark_inline_variable (decl, declspecs->locations[ds_inline]); if (!DECL_VAR_DECLARED_INLINE_P (decl) && !(cxx_dialect >= cxx17 && constexpr_p)) /* Even if there is an in-class initialization, DECL is considered undefined until an out-of-class definition is provided, unless this is an inline variable. */ DECL_EXTERNAL (decl) = 1; if (thread_p) { CP_DECL_THREAD_LOCAL_P (decl) = true; if (!processing_template_decl) set_decl_tls_model (decl, decl_default_tls_model (decl)); if (declspecs->gnu_thread_keyword_p) SET_DECL_GNU_TLS_P (decl); } } else { if (concept_p) { error_at (declspecs->locations[ds_concept], "non-static data member %qE declared %qs", unqualified_id, "concept"); concept_p = false; constexpr_p = false; } else if (constexpr_p) { error_at (declspecs->locations[ds_constexpr], "non-static data member %qE declared %qs", unqualified_id, "constexpr"); constexpr_p = false; } if (constinit_p) { error_at (declspecs->locations[ds_constinit], "non-static data member %qE declared %qs", unqualified_id, "constinit"); constinit_p = false; } if (consteval_p) { error_at (declspecs->locations[ds_consteval], "non-static data member %qE declared %qs", unqualified_id, "consteval"); consteval_p = false; } decl = build_decl (id_loc, FIELD_DECL, unqualified_id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !unqualified_id) { TREE_NO_WARNING (decl) = 1; DECL_PADDING_P (decl) = 1; } if (storage_class == sc_mutable) { DECL_MUTABLE_P (decl) = 1; storage_class = sc_none; } if (initialized) { /* An attempt is being made to initialize a non-static member. This is new in C++11. */ maybe_warn_cpp0x (CPP0X_NSDMI); /* If this has been parsed with static storage class, but errors forced staticp to be cleared, ensure NSDMI is not present. */ if (declspecs->storage_class == sc_static) DECL_INITIAL (decl) = error_mark_node; } } bad_specifiers (decl, BSP_FIELD, virtualp, memfn_quals != TYPE_UNQUALIFIED, staticp ? false : inlinep, friendp, raises != NULL_TREE, declspecs->locations); } } else if (FUNC_OR_METHOD_TYPE_P (type)) { tree original_name; int publicp = 0; if (!unqualified_id) return error_mark_node; if (TREE_CODE (unqualified_id) == TEMPLATE_ID_EXPR) original_name = dname; else original_name = unqualified_id; // FIXME:gcc_assert (original_name == dname); if (storage_class == sc_auto) error_at (declspecs->locations[ds_storage_class], "storage class %<auto%> invalid for function %qs", name); else if (storage_class == sc_register) error_at (declspecs->locations[ds_storage_class], "storage class %<register%> invalid for function %qs", name); else if (thread_p) { if (declspecs->gnu_thread_keyword_p) error_at (declspecs->locations[ds_thread], "storage class %<__thread%> invalid for function %qs", name); else error_at (declspecs->locations[ds_thread], "storage class %<thread_local%> invalid for " "function %qs", name); } if (virt_specifiers) error ("virt-specifiers in %qs not allowed outside a class " "definition", name); /* Function declaration not at top level. Storage classes other than `extern' are not allowed and `extern' makes no difference. */ if (! toplevel_bindings_p () && (storage_class == sc_static || decl_spec_seq_has_spec_p (declspecs, ds_inline)) && pedantic) { if (storage_class == sc_static) pedwarn (declspecs->locations[ds_storage_class], OPT_Wpedantic, "%<static%> specifier invalid for function %qs " "declared out of global scope", name); else pedwarn (declspecs->locations[ds_inline], OPT_Wpedantic, "%<inline%> specifier invalid for function %qs " "declared out of global scope", name); } if (ctype == NULL_TREE) { if (virtualp) { error ("virtual non-class function %qs", name); virtualp = 0; } else if (sfk == sfk_constructor || sfk == sfk_destructor) { error (funcdef_flag ? G_("%qs defined in a non-class scope") : G_("%qs declared in a non-class scope"), name); sfk = sfk_none; } } if (consteval_p && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id)) { error_at (declspecs->locations[ds_consteval], "%qD cannot be %qs", unqualified_id, "consteval"); consteval_p = false; } /* Record whether the function is public. */ publicp = (ctype != NULL_TREE || storage_class != sc_static); decl = grokfndecl (ctype, type, original_name, parms, unqualified_id, declspecs, reqs, virtualp, flags, memfn_quals, rqual, raises, 1, friendp, publicp, inlinep | (2 * constexpr_p) | (4 * concept_p) | (8 * consteval_p), initialized == SD_DELETED, sfk, funcdef_flag, late_return_type_p, initialized, template_count, in_namespace, attrlist, id_loc); if (decl == NULL_TREE) return error_mark_node; if (explicitp == 2) DECL_NONCONVERTING_P (decl) = 1; if (staticp == 1) { int invalid_static = 0; /* Don't allow a static member function in a class, and forbid declaring main to be static. */ if (TREE_CODE (type) == METHOD_TYPE) { permerror (input_location, "cannot declare member function %qD to have " "static linkage", decl); invalid_static = 1; } else if (current_function_decl) { /* 7.1.1: There can be no static function declarations within a block. */ error_at (declspecs->locations[ds_storage_class], "cannot declare static function inside another function"); invalid_static = 1; } if (invalid_static) { staticp = 0; storage_class = sc_none; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ decl = grokvardecl (type, dname, unqualified_id, declspecs, initialized, type_quals, inlinep, concept_p, template_count, ctype ? ctype : in_namespace, id_loc); if (decl == NULL_TREE) return error_mark_node; bad_specifiers (decl, BSP_VAR, virtualp, memfn_quals != TYPE_UNQUALIFIED, inlinep, friendp, raises != NULL_TREE, declspecs->locations); if (ctype) { DECL_CONTEXT (decl) = ctype; if (staticp == 1) { permerror (declspecs->locations[ds_storage_class], "%<static%> may not be used when defining " "(as opposed to declaring) a static data member"); staticp = 0; storage_class = sc_none; } if (storage_class == sc_register && TREE_STATIC (decl)) { error ("static member %qD declared %<register%>", decl); storage_class = sc_none; } if (storage_class == sc_extern && pedantic) { pedwarn (input_location, OPT_Wpedantic, "cannot explicitly declare member %q#D to have " "extern linkage", decl); storage_class = sc_none; } } else if (constexpr_p && DECL_EXTERNAL (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "declaration of %<constexpr%> variable %qD " "is not a definition", decl); constexpr_p = false; } if (consteval_p) { error_at (DECL_SOURCE_LOCATION (decl), "a variable cannot be declared %<consteval%>"); consteval_p = false; } if (inlinep) mark_inline_variable (decl, declspecs->locations[ds_inline]); if (innermost_code == cdk_decomp) { gcc_assert (declarator && declarator->kind == cdk_decomp); DECL_SOURCE_LOCATION (decl) = id_loc; DECL_ARTIFICIAL (decl) = 1; fit_decomposition_lang_decl (decl, NULL_TREE); } } if (VAR_P (decl) && !initialized) if (tree auto_node = type_uses_auto (type)) if (!CLASS_PLACEHOLDER_TEMPLATE (auto_node)) { location_t loc = declspecs->locations[ds_type_spec]; error_at (loc, "declaration of %q#D has no initializer", decl); TREE_TYPE (decl) = error_mark_node; } if (storage_class == sc_extern && initialized && !funcdef_flag) { if (toplevel_bindings_p ()) { /* It's common practice (and completely valid) to have a const be initialized and declared extern. */ if (!(type_quals & TYPE_QUAL_CONST)) warning_at (DECL_SOURCE_LOCATION (decl), 0, "%qs initialized and declared %<extern%>", name); } else { error_at (DECL_SOURCE_LOCATION (decl), "%qs has both %<extern%> and initializer", name); return error_mark_node; } } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == sc_register) { DECL_REGISTER (decl) = 1; /* Warn about register storage specifiers on PARM_DECLs. */ if (TREE_CODE (decl) == PARM_DECL) { if (cxx_dialect >= cxx17) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "ISO C++17 does not allow %<register%> storage " "class specifier"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "%<register%> storage class specifier used"); } } else if (storage_class == sc_extern) DECL_THIS_EXTERN (decl) = 1; else if (storage_class == sc_static) DECL_THIS_STATIC (decl) = 1; /* Set constexpr flag on vars (functions got it in grokfndecl). */ if (constexpr_p && VAR_P (decl)) DECL_DECLARED_CONSTEXPR_P (decl) = true; /* Record constancy and volatility on the DECL itself . There's no need to do this when processing a template; we'll do this for the instantiated declaration based on the type of DECL. */ if (!processing_template_decl) cp_apply_type_quals_to_decl (type_quals, decl); return decl; } } /* Subroutine of start_function. Ensure that each of the parameter types (as listed in PARMS) is complete, as is required for a function definition. */ static void require_complete_types_for_parms (tree parms) { for (; parms; parms = DECL_CHAIN (parms)) { if (dependent_type_p (TREE_TYPE (parms))) continue; if (!VOID_TYPE_P (TREE_TYPE (parms)) && complete_type_or_else (TREE_TYPE (parms), parms)) { relayout_decl (parms); DECL_ARG_TYPE (parms) = type_passed_as (TREE_TYPE (parms)); maybe_warn_parm_abi (TREE_TYPE (parms), DECL_SOURCE_LOCATION (parms)); } else /* grokparms or complete_type_or_else will have already issued an error. */ TREE_TYPE (parms) = error_mark_node; } } /* Returns nonzero if T is a local variable. */ int local_variable_p (const_tree t) { if ((VAR_P (t) /* A VAR_DECL with a context that is a _TYPE is a static data member. */ && !TYPE_P (CP_DECL_CONTEXT (t)) /* Any other non-local variable must be at namespace scope. */ && !DECL_NAMESPACE_SCOPE_P (t)) || (TREE_CODE (t) == PARM_DECL)) return 1; return 0; } /* Like local_variable_p, but suitable for use as a tree-walking function. */ static tree local_variable_p_walkfn (tree *tp, int *walk_subtrees, void * /*data*/) { if (local_variable_p (*tp) && (!DECL_ARTIFICIAL (*tp) || DECL_NAME (*tp) == this_identifier)) return *tp; else if (TYPE_P (*tp)) *walk_subtrees = 0; return NULL_TREE; } /* Check that ARG, which is a default-argument expression for a parameter DECL, is valid. Returns ARG, or ERROR_MARK_NODE, if something goes wrong. DECL may also be a _TYPE node, rather than a DECL, if there is no DECL available. */ tree check_default_argument (tree decl, tree arg, tsubst_flags_t complain) { tree var; tree decl_type; if (TREE_CODE (arg) == DEFERRED_PARSE) /* We get a DEFERRED_PARSE when looking at an in-class declaration with a default argument. Ignore the argument for now; we'll deal with it after the class is complete. */ return arg; if (TYPE_P (decl)) { decl_type = decl; decl = NULL_TREE; } else decl_type = TREE_TYPE (decl); if (arg == error_mark_node || decl == error_mark_node || TREE_TYPE (arg) == error_mark_node || decl_type == error_mark_node) /* Something already went wrong. There's no need to check further. */ return error_mark_node; /* [dcl.fct.default] A default argument expression is implicitly converted to the parameter type. */ ++cp_unevaluated_operand; /* Avoid digest_init clobbering the initializer. */ tree carg = BRACE_ENCLOSED_INITIALIZER_P (arg) ? unshare_expr (arg): arg; perform_implicit_conversion_flags (decl_type, carg, complain, LOOKUP_IMPLICIT); --cp_unevaluated_operand; /* Avoid redundant -Wzero-as-null-pointer-constant warnings at the call sites. */ if (TYPE_PTR_OR_PTRMEM_P (decl_type) && null_ptr_cst_p (arg) /* Don't lose side-effects as in PR90473. */ && !TREE_SIDE_EFFECTS (arg)) return nullptr_node; /* [dcl.fct.default] Local variables shall not be used in default argument expressions. The keyword `this' shall not be used in a default argument of a member function. */ var = cp_walk_tree_without_duplicates (&arg, local_variable_p_walkfn, NULL); if (var) { if (complain & tf_warning_or_error) { if (DECL_NAME (var) == this_identifier) permerror (input_location, "default argument %qE uses %qD", arg, var); else error ("default argument %qE uses local variable %qD", arg, var); } return error_mark_node; } /* All is well. */ return arg; } /* Returns a deprecated type used within TYPE, or NULL_TREE if none. */ static tree type_is_deprecated (tree type) { enum tree_code code; if (TREE_DEPRECATED (type)) return type; if (TYPE_NAME (type)) { if (TREE_DEPRECATED (TYPE_NAME (type))) return type; else { cp_warn_deprecated_use_scopes (CP_DECL_CONTEXT (TYPE_NAME (type))); return NULL_TREE; } } /* Do warn about using typedefs to a deprecated class. */ if (OVERLOAD_TYPE_P (type) && type != TYPE_MAIN_VARIANT (type)) return type_is_deprecated (TYPE_MAIN_VARIANT (type)); code = TREE_CODE (type); if (code == POINTER_TYPE || code == REFERENCE_TYPE || code == OFFSET_TYPE || code == FUNCTION_TYPE || code == METHOD_TYPE || code == ARRAY_TYPE) return type_is_deprecated (TREE_TYPE (type)); if (TYPE_PTRMEMFUNC_P (type)) return type_is_deprecated (TREE_TYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (type)))); return NULL_TREE; } /* Decode the list of parameter types for a function type. Given the list of things declared inside the parens, return a list of types. If this parameter does not end with an ellipsis, we append void_list_node. *PARMS is set to the chain of PARM_DECLs created. */ tree grokparms (tree parmlist, tree *parms) { tree result = NULL_TREE; tree decls = NULL_TREE; tree parm; int any_error = 0; for (parm = parmlist; parm != NULL_TREE; parm = TREE_CHAIN (parm)) { tree type = NULL_TREE; tree init = TREE_PURPOSE (parm); tree decl = TREE_VALUE (parm); if (parm == void_list_node) break; if (! decl || TREE_TYPE (decl) == error_mark_node) continue; type = TREE_TYPE (decl); if (VOID_TYPE_P (type)) { if (same_type_p (type, void_type_node) && !init && !DECL_NAME (decl) && !result && TREE_CHAIN (parm) == void_list_node) /* DR 577: A parameter list consisting of a single unnamed parameter of non-dependent type 'void'. */ break; else if (cv_qualified_p (type)) error_at (DECL_SOURCE_LOCATION (decl), "invalid use of cv-qualified type %qT in " "parameter declaration", type); else error_at (DECL_SOURCE_LOCATION (decl), "invalid use of type %<void%> in parameter " "declaration"); /* It's not a good idea to actually create parameters of type `void'; other parts of the compiler assume that a void type terminates the parameter list. */ type = error_mark_node; TREE_TYPE (decl) = error_mark_node; } if (type != error_mark_node) { if (deprecated_state != DEPRECATED_SUPPRESS) { tree deptype = type_is_deprecated (type); if (deptype) cp_warn_deprecated_use (deptype); } /* [dcl.fct] "A parameter with volatile-qualified type is deprecated." */ if (CP_TYPE_VOLATILE_P (type)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wvolatile, "%<volatile%>-qualified parameter is " "deprecated"); /* Top-level qualifiers on the parameters are ignored for function types. */ type = cp_build_qualified_type (type, 0); if (TREE_CODE (type) == METHOD_TYPE) { error ("parameter %qD invalidly declared method type", decl); type = build_pointer_type (type); TREE_TYPE (decl) = type; } else if (abstract_virtuals_error (decl, type)) any_error = 1; /* Seems like a good idea. */ else if (cxx_dialect < cxx17 && INDIRECT_TYPE_P (type)) { /* Before C++17 DR 393: [dcl.fct]/6, parameter types cannot contain pointers (references) to arrays of unknown bound. */ tree t = TREE_TYPE (type); int ptr = TYPE_PTR_P (type); while (1) { if (TYPE_PTR_P (t)) ptr = 1; else if (TREE_CODE (t) != ARRAY_TYPE) break; else if (!TYPE_DOMAIN (t)) break; t = TREE_TYPE (t); } if (TREE_CODE (t) == ARRAY_TYPE) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wpedantic, ptr ? G_("parameter %qD includes pointer to array of " "unknown bound %qT") : G_("parameter %qD includes reference to array of " "unknown bound %qT"), decl, t); } if (any_error) init = NULL_TREE; else if (init && !processing_template_decl) init = check_default_argument (decl, init, tf_warning_or_error); } DECL_CHAIN (decl) = decls; decls = decl; result = tree_cons (init, type, result); } decls = nreverse (decls); result = nreverse (result); if (parm) result = chainon (result, void_list_node); *parms = decls; return result; } /* D is a constructor or overloaded `operator='. Let T be the class in which D is declared. Then, this function returns: -1 if D's is an ill-formed constructor or copy assignment operator whose first parameter is of type `T'. 0 if D is not a copy constructor or copy assignment operator. 1 if D is a copy constructor or copy assignment operator whose first parameter is a reference to non-const qualified T. 2 if D is a copy constructor or copy assignment operator whose first parameter is a reference to const qualified T. This function can be used as a predicate. Positive values indicate a copy constructor and nonzero values indicate a copy assignment operator. */ int copy_fn_p (const_tree d) { tree args; tree arg_type; int result = 1; gcc_assert (DECL_FUNCTION_MEMBER_P (d)); if (TREE_CODE (d) == TEMPLATE_DECL || (DECL_TEMPLATE_INFO (d) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (d)))) /* Instantiations of template member functions are never copy functions. Note that member functions of templated classes are represented as template functions internally, and we must accept those as copy functions. */ return 0; args = FUNCTION_FIRST_USER_PARMTYPE (d); if (!args) return 0; arg_type = TREE_VALUE (args); if (arg_type == error_mark_node) return 0; if (TYPE_MAIN_VARIANT (arg_type) == DECL_CONTEXT (d)) { /* Pass by value copy assignment operator. */ result = -1; } else if (TYPE_REF_P (arg_type) && !TYPE_REF_IS_RVALUE (arg_type) && TYPE_MAIN_VARIANT (TREE_TYPE (arg_type)) == DECL_CONTEXT (d)) { if (CP_TYPE_CONST_P (TREE_TYPE (arg_type))) result = 2; } else return 0; args = TREE_CHAIN (args); if (args && args != void_list_node && !TREE_PURPOSE (args)) /* There are more non-optional args. */ return 0; return result; } /* D is a constructor or overloaded `operator='. Let T be the class in which D is declared. Then, this function returns true when D is a move constructor or move assignment operator, false otherwise. */ bool move_fn_p (const_tree d) { gcc_assert (DECL_FUNCTION_MEMBER_P (d)); if (cxx_dialect == cxx98) /* There are no move constructors if we are in C++98 mode. */ return false; if (TREE_CODE (d) == TEMPLATE_DECL || (DECL_TEMPLATE_INFO (d) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (d)))) /* Instantiations of template member functions are never move functions. Note that member functions of templated classes are represented as template functions internally, and we must accept those as move functions. */ return 0; return move_signature_fn_p (d); } /* D is a constructor or overloaded `operator='. Then, this function returns true when D has the same signature as a move constructor or move assignment operator (because either it is such a ctor/op= or it is a template specialization with the same signature), false otherwise. */ bool move_signature_fn_p (const_tree d) { tree args; tree arg_type; bool result = false; args = FUNCTION_FIRST_USER_PARMTYPE (d); if (!args) return 0; arg_type = TREE_VALUE (args); if (arg_type == error_mark_node) return 0; if (TYPE_REF_P (arg_type) && TYPE_REF_IS_RVALUE (arg_type) && same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (arg_type)), DECL_CONTEXT (d))) result = true; args = TREE_CHAIN (args); if (args && args != void_list_node && !TREE_PURPOSE (args)) /* There are more non-optional args. */ return false; return result; } /* Remember any special properties of member function DECL. */ void grok_special_member_properties (tree decl) { tree class_type; if (TREE_CODE (decl) == USING_DECL || !DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) return; class_type = DECL_CONTEXT (decl); if (IDENTIFIER_CTOR_P (DECL_NAME (decl))) { int ctor = copy_fn_p (decl); if (!DECL_ARTIFICIAL (decl)) TYPE_HAS_USER_CONSTRUCTOR (class_type) = 1; if (ctor > 0) { /* [class.copy] A non-template constructor for class X is a copy constructor if its first parameter is of type X&, const X&, volatile X& or const volatile X&, and either there are no other parameters or else all other parameters have default arguments. */ TYPE_HAS_COPY_CTOR (class_type) = 1; if (ctor > 1) TYPE_HAS_CONST_COPY_CTOR (class_type) = 1; } else if (sufficient_parms_p (FUNCTION_FIRST_USER_PARMTYPE (decl))) TYPE_HAS_DEFAULT_CONSTRUCTOR (class_type) = 1; else if (is_list_ctor (decl)) TYPE_HAS_LIST_CTOR (class_type) = 1; if (DECL_DECLARED_CONSTEXPR_P (decl) && !ctor && !move_fn_p (decl)) TYPE_HAS_CONSTEXPR_CTOR (class_type) = 1; } else if (DECL_NAME (decl) == assign_op_identifier) { /* [class.copy] A non-template assignment operator for class X is a copy assignment operator if its parameter is of type X, X&, const X&, volatile X& or const volatile X&. */ int assop = copy_fn_p (decl); if (assop) { TYPE_HAS_COPY_ASSIGN (class_type) = 1; if (assop != 1) TYPE_HAS_CONST_COPY_ASSIGN (class_type) = 1; } } else if (IDENTIFIER_CONV_OP_P (DECL_NAME (decl))) TYPE_HAS_CONVERSION (class_type) = true; /* Destructors are handled in check_methods. */ } /* Check a constructor DECL has the correct form. Complains if the class has a constructor of the form X(X). */ bool grok_ctor_properties (const_tree ctype, const_tree decl) { int ctor_parm = copy_fn_p (decl); if (ctor_parm < 0) { /* [class.copy] A declaration of a constructor for a class X is ill-formed if its first parameter is of type (optionally cv-qualified) X and either there are no other parameters or else all other parameters have default arguments. We *don't* complain about member template instantiations that have this form, though; they can occur as we try to decide what constructor to use during overload resolution. Since overload resolution will never prefer such a constructor to the non-template copy constructor (which is either explicitly or implicitly defined), there's no need to worry about their existence. Theoretically, they should never even be instantiated, but that's hard to forestall. */ error_at (DECL_SOURCE_LOCATION (decl), "invalid constructor; you probably meant %<%T (const %T&)%>", ctype, ctype); return false; } return true; } /* DECL is a declaration for an overloaded or conversion operator. If COMPLAIN is true, errors are issued for invalid declarations. */ bool grok_op_properties (tree decl, bool complain) { tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (decl)); bool methodp = TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE; tree name = DECL_NAME (decl); location_t loc = DECL_SOURCE_LOCATION (decl); tree class_type = DECL_CONTEXT (decl); if (class_type && !CLASS_TYPE_P (class_type)) class_type = NULL_TREE; tree_code operator_code; unsigned op_flags; if (IDENTIFIER_CONV_OP_P (name)) { /* Conversion operators are TYPE_EXPR for the purposes of this function. */ operator_code = TYPE_EXPR; op_flags = OVL_OP_FLAG_UNARY; } else { const ovl_op_info_t *ovl_op = IDENTIFIER_OVL_OP_INFO (name); operator_code = ovl_op->tree_code; op_flags = ovl_op->flags; gcc_checking_assert (operator_code != ERROR_MARK); DECL_OVERLOADED_OPERATOR_CODE_RAW (decl) = ovl_op->ovl_op_code; } if (op_flags & OVL_OP_FLAG_ALLOC) { /* operator new and operator delete are quite special. */ if (class_type) switch (op_flags) { case OVL_OP_FLAG_ALLOC: TYPE_HAS_NEW_OPERATOR (class_type) = 1; break; case OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE: TYPE_GETS_DELETE (class_type) |= 1; break; case OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_VEC: TYPE_HAS_ARRAY_NEW_OPERATOR (class_type) = 1; break; case OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE | OVL_OP_FLAG_VEC: TYPE_GETS_DELETE (class_type) |= 2; break; default: gcc_unreachable (); } /* [basic.std.dynamic.allocation]/1: A program is ill-formed if an allocation function is declared in a namespace scope other than global scope or declared static in global scope. The same also holds true for deallocation functions. */ if (DECL_NAMESPACE_SCOPE_P (decl)) { if (CP_DECL_CONTEXT (decl) != global_namespace) { error_at (loc, "%qD may not be declared within a namespace", decl); return false; } if (!TREE_PUBLIC (decl)) { error_at (loc, "%qD may not be declared as static", decl); return false; } } if (op_flags & OVL_OP_FLAG_DELETE) { DECL_SET_IS_OPERATOR_DELETE (decl, true); coerce_delete_type (decl, loc); } else { DECL_SET_IS_OPERATOR_NEW (decl, true); TREE_TYPE (decl) = coerce_new_type (TREE_TYPE (decl), loc); } return true; } /* An operator function must either be a non-static member function or have at least one parameter of a class, a reference to a class, an enumeration, or a reference to an enumeration. 13.4.0.6 */ if (! methodp || DECL_STATIC_FUNCTION_P (decl)) { if (operator_code == TYPE_EXPR || operator_code == CALL_EXPR || operator_code == COMPONENT_REF || operator_code == ARRAY_REF || operator_code == NOP_EXPR) { error_at (loc, "%qD must be a nonstatic member function", decl); return false; } if (DECL_STATIC_FUNCTION_P (decl)) { error_at (loc, "%qD must be either a non-static member " "function or a non-member function", decl); return false; } for (tree arg = argtypes; ; arg = TREE_CHAIN (arg)) { if (!arg || arg == void_list_node) { if (complain) error_at(loc, "%qD must have an argument of class or " "enumerated type", decl); return false; } tree type = non_reference (TREE_VALUE (arg)); if (type == error_mark_node) return false; /* MAYBE_CLASS_TYPE_P, rather than CLASS_TYPE_P, is used because these checks are performed even on template functions. */ if (MAYBE_CLASS_TYPE_P (type) || TREE_CODE (type) == ENUMERAL_TYPE) break; } } if (operator_code == CALL_EXPR) /* There are no further restrictions on the arguments to an overloaded "operator ()". */ return true; if (operator_code == COND_EXPR) { /* 13.4.0.3 */ error_at (loc, "ISO C++ prohibits overloading %<operator ?:%>"); return false; } /* Count the number of arguments and check for ellipsis. */ int arity = 0; for (tree arg = argtypes; arg != void_list_node; arg = TREE_CHAIN (arg)) { if (!arg) { /* Variadic. */ error_at (loc, "%qD must not have variable number of arguments", decl); return false; } ++arity; } /* Verify correct number of arguments. */ switch (op_flags) { case OVL_OP_FLAG_AMBIARY: if (arity == 1) { /* We have a unary instance of an ambi-ary op. Remap to the unary one. */ unsigned alt = ovl_op_alternate[ovl_op_mapping [operator_code]]; const ovl_op_info_t *ovl_op = &ovl_op_info[false][alt]; gcc_checking_assert (ovl_op->flags == OVL_OP_FLAG_UNARY); operator_code = ovl_op->tree_code; DECL_OVERLOADED_OPERATOR_CODE_RAW (decl) = ovl_op->ovl_op_code; } else if (arity != 2) { /* This was an ambiguous operator but is invalid. */ error_at (loc, methodp ? G_("%qD must have either zero or one argument") : G_("%qD must have either one or two arguments"), decl); return false; } else if ((operator_code == POSTINCREMENT_EXPR || operator_code == POSTDECREMENT_EXPR) && ! processing_template_decl /* x++ and x--'s second argument must be an int. */ && ! same_type_p (TREE_VALUE (TREE_CHAIN (argtypes)), integer_type_node)) { error_at (loc, methodp ? G_("postfix %qD must have %<int%> as its argument") : G_("postfix %qD must have %<int%> as its second argument"), decl); return false; } break; case OVL_OP_FLAG_UNARY: if (arity != 1) { error_at (loc, methodp ? G_("%qD must have no arguments") : G_("%qD must have exactly one argument"), decl); return false; } break; case OVL_OP_FLAG_BINARY: if (arity != 2) { error_at (loc, methodp ? G_("%qD must have exactly one argument") : G_("%qD must have exactly two arguments"), decl); return false; } break; default: gcc_unreachable (); } /* There can be no default arguments. */ for (tree arg = argtypes; arg != void_list_node; arg = TREE_CHAIN (arg)) if (TREE_PURPOSE (arg)) { TREE_PURPOSE (arg) = NULL_TREE; error_at (loc, "%qD cannot have default arguments", decl); return false; } /* At this point the declaration is well-formed. It may not be sensible though. */ /* Check member function warnings only on the in-class declaration. There's no point warning on an out-of-class definition. */ if (class_type && class_type != current_class_type) return true; /* Warn about conversion operators that will never be used. */ if (IDENTIFIER_CONV_OP_P (name) && ! DECL_TEMPLATE_INFO (decl) && warn_class_conversion) { tree t = TREE_TYPE (name); int ref = TYPE_REF_P (t); if (ref) t = TYPE_MAIN_VARIANT (TREE_TYPE (t)); if (VOID_TYPE_P (t)) warning_at (loc, OPT_Wclass_conversion, "converting %qT to %<void%> " "will never use a type conversion operator", class_type); else if (class_type) { if (same_type_ignoring_top_level_qualifiers_p (t, class_type)) warning_at (loc, OPT_Wclass_conversion, ref ? G_("converting %qT to a reference to the same type " "will never use a type conversion operator") : G_("converting %qT to the same type " "will never use a type conversion operator"), class_type); /* Don't force t to be complete here. */ else if (MAYBE_CLASS_TYPE_P (t) && COMPLETE_TYPE_P (t) && DERIVED_FROM_P (t, class_type)) warning_at (loc, OPT_Wclass_conversion, ref ? G_("converting %qT to a reference to a base class " "%qT will never use a type conversion operator") : G_("converting %qT to a base class %qT " "will never use a type conversion operator"), class_type, t); } } if (!warn_ecpp) return true; /* Effective C++ rules below. */ /* More Effective C++ rule 7. */ if (operator_code == TRUTH_ANDIF_EXPR || operator_code == TRUTH_ORIF_EXPR || operator_code == COMPOUND_EXPR) warning_at (loc, OPT_Weffc__, "user-defined %qD always evaluates both arguments", decl); /* More Effective C++ rule 6. */ if (operator_code == POSTINCREMENT_EXPR || operator_code == POSTDECREMENT_EXPR || operator_code == PREINCREMENT_EXPR || operator_code == PREDECREMENT_EXPR) { tree arg = TREE_VALUE (argtypes); tree ret = TREE_TYPE (TREE_TYPE (decl)); if (methodp || TYPE_REF_P (arg)) arg = TREE_TYPE (arg); arg = TYPE_MAIN_VARIANT (arg); if (operator_code == PREINCREMENT_EXPR || operator_code == PREDECREMENT_EXPR) { if (!TYPE_REF_P (ret) || !same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (ret)), arg)) warning_at (loc, OPT_Weffc__, "prefix %qD should return %qT", decl, build_reference_type (arg)); } else { if (!same_type_p (TYPE_MAIN_VARIANT (ret), arg)) warning_at (loc, OPT_Weffc__, "postfix %qD should return %qT", decl, arg); } } /* Effective C++ rule 23. */ if (!DECL_ASSIGNMENT_OPERATOR_P (decl) && (operator_code == PLUS_EXPR || operator_code == MINUS_EXPR || operator_code == TRUNC_DIV_EXPR || operator_code == MULT_EXPR || operator_code == TRUNC_MOD_EXPR) && TYPE_REF_P (TREE_TYPE (TREE_TYPE (decl)))) warning_at (loc, OPT_Weffc__, "%qD should return by value", decl); return true; } /* Return a string giving the keyword associate with CODE. */ static const char * tag_name (enum tag_types code) { switch (code) { case record_type: return "struct"; case class_type: return "class"; case union_type: return "union"; case enum_type: return "enum"; case typename_type: return "typename"; default: gcc_unreachable (); } } /* Name lookup in an elaborated-type-specifier (after the keyword indicated by TAG_CODE) has found the TYPE_DECL DECL. If the elaborated-type-specifier is invalid, issue a diagnostic and return error_mark_node; otherwise, return the *_TYPE to which it referred. If ALLOW_TEMPLATE_P is true, TYPE may be a class template. */ tree check_elaborated_type_specifier (enum tag_types tag_code, tree decl, bool allow_template_p) { tree type; /* In the case of: struct S { struct S *p; }; name lookup will find the TYPE_DECL for the implicit "S::S" typedef. Adjust for that here. */ if (DECL_SELF_REFERENCE_P (decl)) decl = TYPE_NAME (TREE_TYPE (decl)); type = TREE_TYPE (decl); /* Check TEMPLATE_TYPE_PARM first because DECL_IMPLICIT_TYPEDEF_P is false for this case as well. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) { error ("using template type parameter %qT after %qs", type, tag_name (tag_code)); return error_mark_node; } /* Accept template template parameters. */ else if (allow_template_p && (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM || TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM)) ; /* [dcl.type.elab] If the identifier resolves to a typedef-name or the simple-template-id resolves to an alias template specialization, the elaborated-type-specifier is ill-formed. In other words, the only legitimate declaration to use in the elaborated type specifier is the implicit typedef created when the type is declared. */ else if (!DECL_IMPLICIT_TYPEDEF_P (decl) && !DECL_SELF_REFERENCE_P (decl) && tag_code != typename_type) { if (alias_template_specialization_p (type, nt_opaque)) error ("using alias template specialization %qT after %qs", type, tag_name (tag_code)); else error ("using typedef-name %qD after %qs", decl, tag_name (tag_code)); inform (DECL_SOURCE_LOCATION (decl), "%qD has a previous declaration here", decl); return error_mark_node; } else if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && tag_code != enum_type && tag_code != typename_type) { error ("%qT referred to as %qs", type, tag_name (tag_code)); inform (location_of (type), "%qT has a previous declaration here", type); return error_mark_node; } else if (TREE_CODE (type) != ENUMERAL_TYPE && tag_code == enum_type) { error ("%qT referred to as enum", type); inform (location_of (type), "%qT has a previous declaration here", type); return error_mark_node; } else if (!allow_template_p && TREE_CODE (type) == RECORD_TYPE && CLASSTYPE_IS_TEMPLATE (type)) { /* If a class template appears as elaborated type specifier without a template header such as: template <class T> class C {}; void f(class C); // No template header here then the required template argument is missing. */ error ("template argument required for %<%s %T%>", tag_name (tag_code), DECL_NAME (CLASSTYPE_TI_TEMPLATE (type))); return error_mark_node; } return type; } /* Lookup NAME in elaborate type specifier in scope according to SCOPE and issue diagnostics if necessary. Return *_TYPE node upon success, NULL_TREE when the NAME is not found, and ERROR_MARK_NODE for type error. */ static tree lookup_and_check_tag (enum tag_types tag_code, tree name, tag_scope scope, bool template_header_p) { tree t; tree decl; if (scope == ts_global) { /* First try ordinary name lookup, ignoring hidden class name injected via friend declaration. */ decl = lookup_name_prefer_type (name, 2); decl = strip_using_decl (decl); /* If that fails, the name will be placed in the smallest non-class, non-function-prototype scope according to 3.3.1/5. We may already have a hidden name declared as friend in this scope. So lookup again but not ignoring hidden names. If we find one, that name will be made visible rather than creating a new tag. */ if (!decl) decl = lookup_type_scope (name, ts_within_enclosing_non_class); } else decl = lookup_type_scope (name, scope); if (decl && (DECL_CLASS_TEMPLATE_P (decl) /* If scope is ts_current we're defining a class, so ignore a template template parameter. */ || (scope != ts_current && DECL_TEMPLATE_TEMPLATE_PARM_P (decl)))) decl = DECL_TEMPLATE_RESULT (decl); if (decl && TREE_CODE (decl) == TYPE_DECL) { /* Look for invalid nested type: class C { class C {}; }; */ if (scope == ts_current && DECL_SELF_REFERENCE_P (decl)) { error ("%qD has the same name as the class in which it is " "declared", decl); return error_mark_node; } /* Two cases we need to consider when deciding if a class template is allowed as an elaborated type specifier: 1. It is a self reference to its own class. 2. It comes with a template header. For example: template <class T> class C { class C *c1; // DECL_SELF_REFERENCE_P is true class D; }; template <class U> class C; // template_header_p is true template <class T> class C<T>::D { class C *c2; // DECL_SELF_REFERENCE_P is true }; */ t = check_elaborated_type_specifier (tag_code, decl, template_header_p | DECL_SELF_REFERENCE_P (decl)); if (template_header_p && t && CLASS_TYPE_P (t) && (!CLASSTYPE_TEMPLATE_INFO (t) || (!PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t))))) { error ("%qT is not a template", t); inform (location_of (t), "previous declaration here"); if (TYPE_CLASS_SCOPE_P (t) && CLASSTYPE_TEMPLATE_INFO (TYPE_CONTEXT (t))) inform (input_location, "perhaps you want to explicitly add %<%T::%>", TYPE_CONTEXT (t)); t = error_mark_node; } return t; } else if (decl && TREE_CODE (decl) == TREE_LIST) { error ("reference to %qD is ambiguous", name); print_candidates (decl); return error_mark_node; } else return NULL_TREE; } /* Get the struct, enum or union (TAG_CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. If a declaration is given, process it here, and report an error if multiple declarations are not identical. SCOPE is TS_CURRENT when this is also a definition. Only look in the current frame for the name (since C++ allows new names in any scope.) It is TS_WITHIN_ENCLOSING_NON_CLASS if this is a friend declaration. Only look beginning from the current scope outward up till the nearest non-class scope. Otherwise it is TS_GLOBAL. TEMPLATE_HEADER_P is true when this declaration is preceded by a set of template parameters. */ static tree xref_tag_1 (enum tag_types tag_code, tree name, tag_scope scope, bool template_header_p) { enum tree_code code; tree context = NULL_TREE; gcc_assert (identifier_p (name)); switch (tag_code) { case record_type: case class_type: code = RECORD_TYPE; break; case union_type: code = UNION_TYPE; break; case enum_type: code = ENUMERAL_TYPE; break; default: gcc_unreachable (); } /* In case of anonymous name, xref_tag is only called to make type node and push name. Name lookup is not required. */ tree t = NULL_TREE; if (scope != ts_lambda && !IDENTIFIER_ANON_P (name)) t = lookup_and_check_tag (tag_code, name, scope, template_header_p); if (t == error_mark_node) return error_mark_node; if (scope != ts_current && t && current_class_type && template_class_depth (current_class_type) && template_header_p) { if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM) return t; /* Since SCOPE is not TS_CURRENT, we are not looking at a definition of this tag. Since, in addition, we are currently processing a (member) template declaration of a template class, we must be very careful; consider: template <class X> struct S1 template <class U> struct S2 { template <class V> friend struct S1; }; Here, the S2::S1 declaration should not be confused with the outer declaration. In particular, the inner version should have a template parameter of level 2, not level 1. On the other hand, when presented with: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> friend struct S2; }; the friend must find S1::S2 eventually. We accomplish this by making sure that the new type we create to represent this declaration has the right TYPE_CONTEXT. */ context = TYPE_CONTEXT (t); t = NULL_TREE; } if (! t) { /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ if (code == ENUMERAL_TYPE) { error ("use of enum %q#D without previous declaration", name); return error_mark_node; } else { t = make_class_type (code); TYPE_CONTEXT (t) = context; if (scope == ts_lambda) { /* Mark it as a lambda type. */ CLASSTYPE_LAMBDA_EXPR (t) = error_mark_node; /* And push it into current scope. */ scope = ts_current; } t = pushtag (name, t, scope); } } else { if (template_header_p && MAYBE_CLASS_TYPE_P (t)) { /* Check that we aren't trying to overload a class with different constraints. */ tree constr = NULL_TREE; if (current_template_parms) { tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); constr = build_constraints (reqs, NULL_TREE); } if (!redeclare_class_template (t, current_template_parms, constr)) return error_mark_node; } else if (!processing_template_decl && CLASS_TYPE_P (t) && CLASSTYPE_IS_TEMPLATE (t)) { error ("redeclaration of %qT as a non-template", t); inform (location_of (t), "previous declaration %qD", t); return error_mark_node; } if (scope != ts_within_enclosing_non_class && TYPE_HIDDEN_P (t)) { /* This is no longer an invisible friend. Make it visible. */ tree decl = TYPE_NAME (t); DECL_ANTICIPATED (decl) = false; DECL_FRIEND_P (decl) = false; if (TYPE_TEMPLATE_INFO (t)) { tree tmpl = TYPE_TI_TEMPLATE (t); DECL_ANTICIPATED (tmpl) = false; DECL_FRIEND_P (tmpl) = false; } } } return t; } /* Wrapper for xref_tag_1. */ tree xref_tag (enum tag_types tag_code, tree name, tag_scope scope, bool template_header_p) { tree ret; bool subtime; subtime = timevar_cond_start (TV_NAME_LOOKUP); ret = xref_tag_1 (tag_code, name, scope, template_header_p); timevar_cond_stop (TV_NAME_LOOKUP, subtime); return ret; } tree xref_tag_from_type (tree old, tree id, tag_scope scope) { enum tag_types tag_kind; if (TREE_CODE (old) == RECORD_TYPE) tag_kind = (CLASSTYPE_DECLARED_CLASS (old) ? class_type : record_type); else tag_kind = union_type; if (id == NULL_TREE) id = TYPE_IDENTIFIER (old); return xref_tag (tag_kind, id, scope, false); } /* Create the binfo hierarchy for REF with (possibly NULL) base list BASE_LIST. For each element on BASE_LIST the TREE_PURPOSE is an access_* node, and the TREE_VALUE is the type of the base-class. Non-NULL TREE_TYPE indicates virtual inheritance. */ void xref_basetypes (tree ref, tree base_list) { tree *basep; tree binfo, base_binfo; unsigned max_vbases = 0; /* Maximum direct & indirect virtual bases. */ unsigned max_bases = 0; /* Maximum direct bases. */ unsigned max_dvbases = 0; /* Maximum direct virtual bases. */ int i; tree default_access; tree igo_prev; /* Track Inheritance Graph Order. */ if (ref == error_mark_node) return; /* The base of a derived class is private by default, all others are public. */ default_access = (TREE_CODE (ref) == RECORD_TYPE && CLASSTYPE_DECLARED_CLASS (ref) ? access_private_node : access_public_node); /* First, make sure that any templates in base-classes are instantiated. This ensures that if we call ourselves recursively we do not get confused about which classes are marked and which are not. */ basep = &base_list; while (*basep) { tree basetype = TREE_VALUE (*basep); /* The dependent_type_p call below should really be dependent_scope_p so that we give a hard error about using an incomplete type as a base, but we allow it with a pedwarn for backward compatibility. */ if (processing_template_decl && CLASS_TYPE_P (basetype) && TYPE_BEING_DEFINED (basetype)) cxx_incomplete_type_diagnostic (NULL_TREE, basetype, DK_PEDWARN); if (!dependent_type_p (basetype) && !complete_type_or_else (basetype, NULL)) /* An incomplete type. Remove it from the list. */ *basep = TREE_CHAIN (*basep); else { max_bases++; if (TREE_TYPE (*basep)) max_dvbases++; if (CLASS_TYPE_P (basetype)) max_vbases += vec_safe_length (CLASSTYPE_VBASECLASSES (basetype)); basep = &TREE_CHAIN (*basep); } } max_vbases += max_dvbases; TYPE_MARKED_P (ref) = 1; /* The binfo slot should be empty, unless this is an (ill-formed) redefinition. */ gcc_assert (!TYPE_BINFO (ref) || TYPE_SIZE (ref)); gcc_assert (TYPE_MAIN_VARIANT (ref) == ref); binfo = make_tree_binfo (max_bases); TYPE_BINFO (ref) = binfo; BINFO_OFFSET (binfo) = size_zero_node; BINFO_TYPE (binfo) = ref; /* Apply base-class info set up to the variants of this type. */ fixup_type_variants (ref); if (max_bases) { vec_alloc (BINFO_BASE_ACCESSES (binfo), max_bases); /* A C++98 POD cannot have base classes. */ CLASSTYPE_NON_LAYOUT_POD_P (ref) = true; if (TREE_CODE (ref) == UNION_TYPE) { error ("derived union %qT invalid", ref); return; } } if (max_bases > 1) warning (OPT_Wmultiple_inheritance, "%qT defined with multiple direct bases", ref); if (max_vbases) { /* An aggregate can't have virtual base classes. */ CLASSTYPE_NON_AGGREGATE (ref) = true; vec_alloc (CLASSTYPE_VBASECLASSES (ref), max_vbases); if (max_dvbases) warning (OPT_Wvirtual_inheritance, "%qT defined with direct virtual base", ref); } for (igo_prev = binfo; base_list; base_list = TREE_CHAIN (base_list)) { tree access = TREE_PURPOSE (base_list); int via_virtual = TREE_TYPE (base_list) != NULL_TREE; tree basetype = TREE_VALUE (base_list); if (access == access_default_node) access = default_access; /* Before C++17, an aggregate cannot have base classes. In C++17, an aggregate can't have virtual, private, or protected base classes. */ if (cxx_dialect < cxx17 || access != access_public_node || via_virtual) CLASSTYPE_NON_AGGREGATE (ref) = true; if (PACK_EXPANSION_P (basetype)) basetype = PACK_EXPANSION_PATTERN (basetype); if (TREE_CODE (basetype) == TYPE_DECL) basetype = TREE_TYPE (basetype); if (!MAYBE_CLASS_TYPE_P (basetype) || TREE_CODE (basetype) == UNION_TYPE) { error ("base type %qT fails to be a struct or class type", basetype); goto dropped_base; } base_binfo = NULL_TREE; if (CLASS_TYPE_P (basetype) && !dependent_scope_p (basetype)) { base_binfo = TYPE_BINFO (basetype); /* The original basetype could have been a typedef'd type. */ basetype = BINFO_TYPE (base_binfo); /* Inherit flags from the base. */ TYPE_HAS_NEW_OPERATOR (ref) |= TYPE_HAS_NEW_OPERATOR (basetype); TYPE_HAS_ARRAY_NEW_OPERATOR (ref) |= TYPE_HAS_ARRAY_NEW_OPERATOR (basetype); TYPE_GETS_DELETE (ref) |= TYPE_GETS_DELETE (basetype); TYPE_HAS_CONVERSION (ref) |= TYPE_HAS_CONVERSION (basetype); CLASSTYPE_DIAMOND_SHAPED_P (ref) |= CLASSTYPE_DIAMOND_SHAPED_P (basetype); CLASSTYPE_REPEATED_BASE_P (ref) |= CLASSTYPE_REPEATED_BASE_P (basetype); } /* We must do this test after we've seen through a typedef type. */ if (TYPE_MARKED_P (basetype)) { if (basetype == ref) error ("recursive type %qT undefined", basetype); else error ("duplicate base type %qT invalid", basetype); goto dropped_base; } if (PACK_EXPANSION_P (TREE_VALUE (base_list))) /* Regenerate the pack expansion for the bases. */ basetype = make_pack_expansion (basetype); TYPE_MARKED_P (basetype) = 1; base_binfo = copy_binfo (base_binfo, basetype, ref, &igo_prev, via_virtual); if (!BINFO_INHERITANCE_CHAIN (base_binfo)) BINFO_INHERITANCE_CHAIN (base_binfo) = binfo; BINFO_BASE_APPEND (binfo, base_binfo); BINFO_BASE_ACCESS_APPEND (binfo, access); continue; dropped_base: /* Update max_vbases to reflect the reality that we are dropping this base: if it reaches zero we want to undo the vec_alloc above to avoid inconsistencies during error-recovery: eg, in build_special_member_call, CLASSTYPE_VBASECLASSES non null and vtt null (c++/27952). */ if (via_virtual) max_vbases--; if (CLASS_TYPE_P (basetype)) max_vbases -= vec_safe_length (CLASSTYPE_VBASECLASSES (basetype)); } if (CLASSTYPE_VBASECLASSES (ref) && max_vbases == 0) vec_free (CLASSTYPE_VBASECLASSES (ref)); if (vec_safe_length (CLASSTYPE_VBASECLASSES (ref)) < max_vbases) /* If we didn't get max_vbases vbases, we must have shared at least one of them, and are therefore diamond shaped. */ CLASSTYPE_DIAMOND_SHAPED_P (ref) = 1; /* Unmark all the types. */ for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) TYPE_MARKED_P (BINFO_TYPE (base_binfo)) = 0; TYPE_MARKED_P (ref) = 0; /* Now see if we have a repeated base type. */ if (!CLASSTYPE_REPEATED_BASE_P (ref)) { for (base_binfo = binfo; base_binfo; base_binfo = TREE_CHAIN (base_binfo)) { if (TYPE_MARKED_P (BINFO_TYPE (base_binfo))) { CLASSTYPE_REPEATED_BASE_P (ref) = 1; break; } TYPE_MARKED_P (BINFO_TYPE (base_binfo)) = 1; } for (base_binfo = binfo; base_binfo; base_binfo = TREE_CHAIN (base_binfo)) if (TYPE_MARKED_P (BINFO_TYPE (base_binfo))) TYPE_MARKED_P (BINFO_TYPE (base_binfo)) = 0; else break; } } /* Copies the enum-related properties from type SRC to type DST. Used with the underlying type of an enum and the enum itself. */ static void copy_type_enum (tree dst, tree src) { tree t; for (t = dst; t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (src); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (src); TYPE_SIZE (t) = TYPE_SIZE (src); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (src); SET_TYPE_MODE (dst, TYPE_MODE (src)); TYPE_PRECISION (t) = TYPE_PRECISION (src); unsigned valign = TYPE_ALIGN (src); if (TYPE_USER_ALIGN (t)) valign = MAX (valign, TYPE_ALIGN (t)); else TYPE_USER_ALIGN (t) = TYPE_USER_ALIGN (src); SET_TYPE_ALIGN (t, valign); TYPE_UNSIGNED (t) = TYPE_UNSIGNED (src); } } /* Begin compiling the definition of an enumeration type. NAME is its name, if ENUMTYPE is not NULL_TREE then the type has alredy been found. UNDERLYING_TYPE is the type that will be used as the storage for the enumeration type. This should be NULL_TREE if no storage type was specified. ATTRIBUTES are any attributes specified after the enum-key. SCOPED_ENUM_P is true if this is a scoped enumeration type. if IS_NEW is not NULL, gets TRUE iff a new type is created. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (tree name, tree enumtype, tree underlying_type, tree attributes, bool scoped_enum_p, bool *is_new) { tree prevtype = NULL_TREE; gcc_assert (identifier_p (name)); if (is_new) *is_new = false; /* [C++0x dcl.enum]p5: If not explicitly specified, the underlying type of a scoped enumeration type is int. */ if (!underlying_type && scoped_enum_p) underlying_type = integer_type_node; if (underlying_type) underlying_type = cv_unqualified (underlying_type); /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (!enumtype) enumtype = lookup_and_check_tag (enum_type, name, /*tag_scope=*/ts_current, /*template_header_p=*/false); /* In case of a template_decl, the only check that should be deferred to instantiation time is the comparison of underlying types. */ if (enumtype && TREE_CODE (enumtype) == ENUMERAL_TYPE) { if (scoped_enum_p != SCOPED_ENUM_P (enumtype)) { error_at (input_location, "scoped/unscoped mismatch " "in enum %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); enumtype = error_mark_node; } else if (ENUM_FIXED_UNDERLYING_TYPE_P (enumtype) != !! underlying_type) { error_at (input_location, "underlying type mismatch " "in enum %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); enumtype = error_mark_node; } else if (underlying_type && ENUM_UNDERLYING_TYPE (enumtype) && !same_type_p (underlying_type, ENUM_UNDERLYING_TYPE (enumtype))) { error_at (input_location, "different underlying type " "in enum %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); underlying_type = NULL_TREE; } } if (!enumtype || TREE_CODE (enumtype) != ENUMERAL_TYPE || processing_template_decl) { /* In case of error, make a dummy enum to allow parsing to continue. */ if (enumtype == error_mark_node) { name = make_anon_name (); enumtype = NULL_TREE; } /* enumtype may be an ENUMERAL_TYPE if this is a redefinition of an opaque enum, or an opaque enum of an already defined enumeration (C++11). In any other case, it'll be NULL_TREE. */ if (!enumtype) { if (is_new) *is_new = true; } prevtype = enumtype; /* Do not push the decl more than once. */ if (!enumtype || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = cxx_make_type (ENUMERAL_TYPE); enumtype = pushtag (name, enumtype, /*tag_scope=*/ts_current); /* std::byte aliases anything. */ if (enumtype != error_mark_node && TYPE_CONTEXT (enumtype) == std_node && !strcmp ("byte", TYPE_NAME_STRING (enumtype))) TYPE_ALIAS_SET (enumtype) = 0; } else enumtype = xref_tag (enum_type, name, /*tag_scope=*/ts_current, false); if (enumtype == error_mark_node) return error_mark_node; /* The enum is considered opaque until the opening '{' of the enumerator list. */ SET_OPAQUE_ENUM_P (enumtype, true); ENUM_FIXED_UNDERLYING_TYPE_P (enumtype) = !! underlying_type; } SET_SCOPED_ENUM_P (enumtype, scoped_enum_p); cplus_decl_attributes (&enumtype, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE); if (underlying_type) { if (ENUM_UNDERLYING_TYPE (enumtype)) /* We already checked that it matches, don't change it to a different typedef variant. */; else if (CP_INTEGRAL_TYPE_P (underlying_type)) { copy_type_enum (enumtype, underlying_type); ENUM_UNDERLYING_TYPE (enumtype) = underlying_type; } else if (dependent_type_p (underlying_type)) ENUM_UNDERLYING_TYPE (enumtype) = underlying_type; else error ("underlying type %qT of %qT must be an integral type", underlying_type, enumtype); } /* If into a template class, the returned enum is always the first declaration (opaque or not) seen. This way all the references to this type will be to the same declaration. The following ones are used only to check for definition errors. */ if (prevtype && processing_template_decl) return prevtype; else return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type. ENUMTYPE is the type object. */ void finish_enum_value_list (tree enumtype) { tree values; tree underlying_type; tree decl; tree value; tree minnode, maxnode; tree t; bool fixed_underlying_type_p = ENUM_UNDERLYING_TYPE (enumtype) != NULL_TREE; /* We built up the VALUES in reverse order. */ TYPE_VALUES (enumtype) = nreverse (TYPE_VALUES (enumtype)); /* For an enum defined in a template, just set the type of the values; all further processing is postponed until the template is instantiated. We need to set the type so that tsubst of a CONST_DECL works. */ if (processing_template_decl) { for (values = TYPE_VALUES (enumtype); values; values = TREE_CHAIN (values)) TREE_TYPE (TREE_VALUE (values)) = enumtype; return; } /* Determine the minimum and maximum values of the enumerators. */ if (TYPE_VALUES (enumtype)) { minnode = maxnode = NULL_TREE; for (values = TYPE_VALUES (enumtype); values; values = TREE_CHAIN (values)) { decl = TREE_VALUE (values); /* [dcl.enum]: Following the closing brace of an enum-specifier, each enumerator has the type of its enumeration. Prior to the closing brace, the type of each enumerator is the type of its initializing value. */ TREE_TYPE (decl) = enumtype; /* Update the minimum and maximum values, if appropriate. */ value = DECL_INITIAL (decl); if (value == error_mark_node) value = integer_zero_node; /* Figure out what the minimum and maximum values of the enumerators are. */ if (!minnode) minnode = maxnode = value; else if (tree_int_cst_lt (maxnode, value)) maxnode = value; else if (tree_int_cst_lt (value, minnode)) minnode = value; } } else /* [dcl.enum] If the enumerator-list is empty, the underlying type is as if the enumeration had a single enumerator with value 0. */ minnode = maxnode = integer_zero_node; if (!fixed_underlying_type_p) { /* Compute the number of bits require to represent all values of the enumeration. We must do this before the type of MINNODE and MAXNODE are transformed, since tree_int_cst_min_precision relies on the TREE_TYPE of the value it is passed. */ signop sgn = tree_int_cst_sgn (minnode) >= 0 ? UNSIGNED : SIGNED; int lowprec = tree_int_cst_min_precision (minnode, sgn); int highprec = tree_int_cst_min_precision (maxnode, sgn); int precision = MAX (lowprec, highprec); unsigned int itk; bool use_short_enum; /* Determine the underlying type of the enumeration. [dcl.enum] The underlying type of an enumeration is an integral type that can represent all the enumerator values defined in the enumeration. It is implementation-defined which integral type is used as the underlying type for an enumeration except that the underlying type shall not be larger than int unless the value of an enumerator cannot fit in an int or unsigned int. We use "int" or an "unsigned int" as the underlying type, even if a smaller integral type would work, unless the user has explicitly requested that we use the smallest possible type. The user can request that for all enumerations with a command line flag, or for just one enumeration with an attribute. */ use_short_enum = flag_short_enums || lookup_attribute ("packed", TYPE_ATTRIBUTES (enumtype)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype)) { if (precision > TYPE_PRECISION (enumtype)) error ("specified mode too small for enumerated values"); else { use_short_enum = true; precision = TYPE_PRECISION (enumtype); } } for (itk = (use_short_enum ? itk_char : itk_int); itk != itk_none; itk++) { underlying_type = integer_types[itk]; if (underlying_type != NULL_TREE && TYPE_PRECISION (underlying_type) >= precision && TYPE_SIGN (underlying_type) == sgn) break; } if (itk == itk_none) { /* DR 377 IF no integral type can represent all the enumerator values, the enumeration is ill-formed. */ error ("no integral type can represent all of the enumerator values " "for %qT", enumtype); precision = TYPE_PRECISION (long_long_integer_type_node); underlying_type = integer_types[itk_unsigned_long_long]; } /* [dcl.enum] The value of sizeof() applied to an enumeration type, an object of an enumeration type, or an enumerator, is the value of sizeof() applied to the underlying type. */ copy_type_enum (enumtype, underlying_type); /* Compute the minimum and maximum values for the type. [dcl.enum] For an enumeration where emin is the smallest enumerator and emax is the largest, the values of the enumeration are the values of the underlying type in the range bmin to bmax, where bmin and bmax are, respectively, the smallest and largest values of the smallest bit- field that can store emin and emax. */ /* The middle-end currently assumes that types with TYPE_PRECISION narrower than their underlying type are suitably zero or sign extended to fill their mode. Similarly, it assumes that the front end assures that a value of a particular type must be within TYPE_MIN_VALUE and TYPE_MAX_VALUE. We used to set these fields based on bmin and bmax, but that led to invalid assumptions like optimizing away bounds checking. So now we just set the TYPE_PRECISION, TYPE_MIN_VALUE, and TYPE_MAX_VALUE to the values for the mode above and only restrict the ENUM_UNDERLYING_TYPE for the benefit of diagnostics. */ ENUM_UNDERLYING_TYPE (enumtype) = build_distinct_type_copy (underlying_type); TYPE_PRECISION (ENUM_UNDERLYING_TYPE (enumtype)) = precision; set_min_and_max_values_for_integral_type (ENUM_UNDERLYING_TYPE (enumtype), precision, sgn); /* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */ if (flag_strict_enums) set_min_and_max_values_for_integral_type (enumtype, precision, sgn); } else underlying_type = ENUM_UNDERLYING_TYPE (enumtype); /* Convert each of the enumerators to the type of the underlying type of the enumeration. */ for (values = TYPE_VALUES (enumtype); values; values = TREE_CHAIN (values)) { decl = TREE_VALUE (values); iloc_sentinel ils (DECL_SOURCE_LOCATION (decl)); if (fixed_underlying_type_p) /* If the enumeration type has a fixed underlying type, we already checked all of the enumerator values. */ value = DECL_INITIAL (decl); else value = perform_implicit_conversion (underlying_type, DECL_INITIAL (decl), tf_warning_or_error); /* Do not clobber shared ints. */ if (value != error_mark_node) { value = copy_node (value); TREE_TYPE (value) = enumtype; } DECL_INITIAL (decl) = value; } /* Fix up all variant types of this enum type. */ for (t = TYPE_MAIN_VARIANT (enumtype); t; t = TYPE_NEXT_VARIANT (t)) TYPE_VALUES (t) = TYPE_VALUES (enumtype); if (at_class_scope_p () && COMPLETE_TYPE_P (current_class_type) && UNSCOPED_ENUM_P (enumtype)) { insert_late_enum_def_bindings (current_class_type, enumtype); /* TYPE_FIELDS needs fixup. */ fixup_type_variants (current_class_type); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, namespace_bindings_p ()); /* Each enumerator now has the type of its enumeration. Clear the cache so that this change in types doesn't confuse us later on. */ clear_cv_and_fold_caches (); } /* Finishes the enum type. This is called only the first time an enumeration is seen, be it opaque or odinary. ENUMTYPE is the type object. */ void finish_enum (tree enumtype) { if (processing_template_decl) { if (at_function_scope_p ()) add_stmt (build_min (TAG_DEFN, enumtype)); return; } /* If this is a forward declaration, there should not be any variants, though we can get a variant in the middle of an enum-specifier with wacky code like 'enum E { e = sizeof(const E*) };' */ gcc_assert (enumtype == TYPE_MAIN_VARIANT (enumtype) && (TYPE_VALUES (enumtype) || !TYPE_NEXT_VARIANT (enumtype))); } /* Build and install a CONST_DECL for an enumeration constant of the enumeration type ENUMTYPE whose NAME and VALUE (if any) are provided. Apply ATTRIBUTES if available. LOC is the location of NAME. Assignment of sequential values by default is handled here. */ void build_enumerator (tree name, tree value, tree enumtype, tree attributes, location_t loc) { tree decl; tree context; tree type; /* scalar_constant_value will pull out this expression, so make sure it's folded as appropriate. */ if (processing_template_decl) value = fold_non_dependent_expr (value); /* If the VALUE was erroneous, pretend it wasn't there; that will result in the enum being assigned the next value in sequence. */ if (value == error_mark_node) value = NULL_TREE; /* Remove no-op casts from the value. */ if (value) STRIP_TYPE_NOPS (value); if (! processing_template_decl) { /* Validate and default VALUE. */ if (value != NULL_TREE) { if (!ENUM_UNDERLYING_TYPE (enumtype)) { tree tmp_value = build_expr_type_conversion (WANT_INT | WANT_ENUM, value, true); if (tmp_value) value = tmp_value; } else if (! INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (value))) value = perform_implicit_conversion_flags (ENUM_UNDERLYING_TYPE (enumtype), value, tf_warning_or_error, LOOKUP_IMPLICIT | LOOKUP_NO_NARROWING); if (value == error_mark_node) value = NULL_TREE; if (value != NULL_TREE) { if (! INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (value))) { error_at (cp_expr_loc_or_input_loc (value), "enumerator value for %qD must have integral or " "unscoped enumeration type", name); value = NULL_TREE; } else { value = cxx_constant_value (value); if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qD is not an integer " "constant", name); value = NULL_TREE; } } } } /* Default based on previous value. */ if (value == NULL_TREE) { if (TYPE_VALUES (enumtype)) { tree prev_value; /* C++03 7.2/4: If no initializer is specified for the first enumerator, the type is an unspecified integral type. Otherwise the type is the same as the type of the initializing value of the preceding enumerator unless the incremented value is not representable in that type, in which case the type is an unspecified integral type sufficient to contain the incremented value. */ prev_value = DECL_INITIAL (TREE_VALUE (TYPE_VALUES (enumtype))); if (error_operand_p (prev_value)) value = error_mark_node; else { wi::overflow_type overflowed; tree type = TREE_TYPE (prev_value); signop sgn = TYPE_SIGN (type); widest_int wi = wi::add (wi::to_widest (prev_value), 1, sgn, &overflowed); if (!overflowed) { bool pos = !wi::neg_p (wi, sgn); if (!wi::fits_to_tree_p (wi, type)) { unsigned int itk; for (itk = itk_int; itk != itk_none; itk++) { type = integer_types[itk]; if (type != NULL_TREE && (pos || !TYPE_UNSIGNED (type)) && wi::fits_to_tree_p (wi, type)) break; } if (type && cxx_dialect < cxx11 && itk > itk_unsigned_long) pedwarn (input_location, OPT_Wlong_long, pos ? G_("\ incremented enumerator value is too large for %<unsigned long%>") : G_("\ incremented enumerator value is too large for %<long%>")); } if (type == NULL_TREE) overflowed = wi::OVF_UNKNOWN; else value = wide_int_to_tree (type, wi); } if (overflowed) { error ("overflow in enumeration values at %qD", name); value = error_mark_node; } } } else value = integer_zero_node; } /* Remove no-op casts from the value. */ STRIP_TYPE_NOPS (value); /* If the underlying type of the enum is fixed, check whether the enumerator values fits in the underlying type. If it does not fit, the program is ill-formed [C++0x dcl.enum]. */ if (ENUM_UNDERLYING_TYPE (enumtype) && value && TREE_CODE (value) == INTEGER_CST) { if (!int_fits_type_p (value, ENUM_UNDERLYING_TYPE (enumtype))) error ("enumerator value %qE is outside the range of underlying " "type %qT", value, ENUM_UNDERLYING_TYPE (enumtype)); /* Convert the value to the appropriate type. */ value = fold_convert (ENUM_UNDERLYING_TYPE (enumtype), value); } } /* C++ associates enums with global, function, or class declarations. */ context = current_scope (); /* Build the actual enumeration constant. Note that the enumeration constants have the underlying type of the enum (if it is fixed) or the type of their initializer (if the underlying type of the enum is not fixed): [ C++0x dcl.enum ] If the underlying type is fixed, the type of each enumerator prior to the closing brace is the underlying type; if the initializing value of an enumerator cannot be represented by the underlying type, the program is ill-formed. If the underlying type is not fixed, the type of each enumerator is the type of its initializing value. If the underlying type is not fixed, it will be computed by finish_enum and we will reset the type of this enumerator. Of course, if we're processing a template, there may be no value. */ type = value ? TREE_TYPE (value) : NULL_TREE; decl = build_decl (loc, CONST_DECL, name, type); DECL_CONTEXT (decl) = enumtype; TREE_CONSTANT (decl) = 1; TREE_READONLY (decl) = 1; DECL_INITIAL (decl) = value; if (attributes) cplus_decl_attributes (&decl, attributes, 0); if (context && context == current_class_type && !SCOPED_ENUM_P (enumtype)) { /* In something like `struct S { enum E { i = 7 }; };' we put `i' on the TYPE_FIELDS list for `S'. (That's so that you can say things like `S::i' later.) */ /* The enumerator may be getting declared outside of its enclosing class, like so: class S { public: enum E : int; }; enum S::E : int { i = 7; }; For which case we need to make sure that the access of `S::i' matches the access of `S::E'. */ tree saved_cas = current_access_specifier; if (TREE_PRIVATE (TYPE_NAME (enumtype))) current_access_specifier = access_private_node; else if (TREE_PROTECTED (TYPE_NAME (enumtype))) current_access_specifier = access_protected_node; else current_access_specifier = access_public_node; finish_member_declaration (decl); current_access_specifier = saved_cas; } else pushdecl (decl); /* Add this enumeration constant to the list for this type. */ TYPE_VALUES (enumtype) = tree_cons (name, decl, TYPE_VALUES (enumtype)); } /* Look for an enumerator with the given NAME within the enumeration type ENUMTYPE. This routine is used primarily for qualified name lookup into an enumerator in C++0x, e.g., enum class Color { Red, Green, Blue }; Color color = Color::Red; Returns the value corresponding to the enumerator, or NULL_TREE if no such enumerator was found. */ tree lookup_enumerator (tree enumtype, tree name) { tree e; gcc_assert (enumtype && TREE_CODE (enumtype) == ENUMERAL_TYPE); e = purpose_member (name, TYPE_VALUES (enumtype)); return e? TREE_VALUE (e) : NULL_TREE; } /* Implement LANG_HOOKS_SIMULATE_ENUM_DECL. */ tree cxx_simulate_enum_decl (location_t loc, const char *name, vec<string_int_pair> values) { location_t saved_loc = input_location; input_location = loc; tree enumtype = start_enum (get_identifier (name), NULL_TREE, NULL_TREE, NULL_TREE, false, NULL); if (!OPAQUE_ENUM_P (enumtype)) { error_at (loc, "multiple definition of %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); return enumtype; } SET_OPAQUE_ENUM_P (enumtype, false); DECL_SOURCE_LOCATION (TYPE_NAME (enumtype)) = loc; string_int_pair *value; unsigned int i; FOR_EACH_VEC_ELT (values, i, value) build_enumerator (get_identifier (value->first), build_int_cst (integer_type_node, value->second), enumtype, NULL_TREE, loc); finish_enum_value_list (enumtype); finish_enum (enumtype); input_location = saved_loc; return enumtype; } /* We're defining DECL. Make sure that its type is OK. */ static void check_function_type (tree decl, tree current_function_parms) { tree fntype = TREE_TYPE (decl); tree return_type = complete_type (TREE_TYPE (fntype)); /* In a function definition, arg types must be complete. */ require_complete_types_for_parms (current_function_parms); if (dependent_type_p (return_type) || type_uses_auto (return_type)) return; if (!COMPLETE_OR_VOID_TYPE_P (return_type)) { tree args = TYPE_ARG_TYPES (fntype); error ("return type %q#T is incomplete", return_type); /* Make it return void instead. */ if (TREE_CODE (fntype) == METHOD_TYPE) fntype = build_method_type_directly (TREE_TYPE (TREE_VALUE (args)), void_type_node, TREE_CHAIN (args)); else fntype = build_function_type (void_type_node, args); fntype = (cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (TREE_TYPE (decl)))); fntype = cxx_copy_lang_qualifiers (fntype, TREE_TYPE (decl)); TREE_TYPE (decl) = fntype; } else { abstract_virtuals_error (decl, TREE_TYPE (fntype)); maybe_warn_parm_abi (TREE_TYPE (fntype), DECL_SOURCE_LOCATION (decl)); } } /* True iff FN is an implicitly-defined default constructor. */ static bool implicit_default_ctor_p (tree fn) { return (DECL_CONSTRUCTOR_P (fn) && !user_provided_p (fn) && sufficient_parms_p (FUNCTION_FIRST_USER_PARMTYPE (fn))); } /* Clobber the contents of *this to let the back end know that the object storage is dead when we enter the constructor or leave the destructor. */ static tree build_clobber_this () { /* Clobbering an empty base is pointless, and harmful if its one byte TYPE_SIZE overlays real data. */ if (is_empty_class (current_class_type)) return void_node; /* If we have virtual bases, clobber the whole object, but only if we're in charge. If we don't have virtual bases, clobber the as-base type so we don't mess with tail padding. */ bool vbases = CLASSTYPE_VBASECLASSES (current_class_type); tree ctype = current_class_type; if (!vbases) ctype = CLASSTYPE_AS_BASE (ctype); tree clobber = build_clobber (ctype); tree thisref = current_class_ref; if (ctype != current_class_type) { thisref = build_nop (build_reference_type (ctype), current_class_ptr); thisref = convert_from_reference (thisref); } tree exprstmt = build2 (MODIFY_EXPR, void_type_node, thisref, clobber); if (vbases) exprstmt = build_if_in_charge (exprstmt); return exprstmt; } /* Create the FUNCTION_DECL for a function definition. DECLSPECS and DECLARATOR are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. FLAGS is a bitwise or of SF_PRE_PARSED (indicating that the DECLARATOR is really the DECL for the function we are about to process and that DECLSPECS should be ignored), SF_INCLASS_INLINE indicating that the function is an inline defined in-class. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. For C++, we must first check whether that datum makes any sense. For example, "class A local_a(1,2);" means that variable local_a is an aggregate of type A, which should have a constructor applied to it with the argument list [1, 2]. On entry, DECL_INITIAL (decl1) should be NULL_TREE or error_mark_node, or may be a BLOCK if the function has been defined previously in this translation unit. On exit, DECL_INITIAL (decl1) will be error_mark_node if the function has never been defined, or a BLOCK if the function has been defined somewhere. */ bool start_preparsed_function (tree decl1, tree attrs, int flags) { tree ctype = NULL_TREE; tree fntype; tree restype; int doing_friend = 0; cp_binding_level *bl; tree current_function_parms; struct c_fileinfo *finfo = get_fileinfo (LOCATION_FILE (DECL_SOURCE_LOCATION (decl1))); bool honor_interface; /* Sanity check. */ gcc_assert (VOID_TYPE_P (TREE_VALUE (void_list_node))); gcc_assert (TREE_CHAIN (void_list_node) == NULL_TREE); fntype = TREE_TYPE (decl1); if (TREE_CODE (fntype) == METHOD_TYPE) ctype = TYPE_METHOD_BASETYPE (fntype); /* ISO C++ 11.4/5. A friend function defined in a class is in the (lexical) scope of the class in which it is defined. */ if (!ctype && DECL_FRIEND_P (decl1)) { ctype = DECL_FRIEND_CONTEXT (decl1); /* CTYPE could be null here if we're dealing with a template; for example, `inline friend float foo()' inside a template will have no CTYPE set. */ if (ctype && TREE_CODE (ctype) != RECORD_TYPE) ctype = NULL_TREE; else doing_friend = 1; } if (DECL_DECLARED_INLINE_P (decl1) && lookup_attribute ("noinline", attrs)) warning_at (DECL_SOURCE_LOCATION (decl1), 0, "inline function %qD given attribute %qs", decl1, "noinline"); /* Handle gnu_inline attribute. */ if (GNU_INLINE_P (decl1)) { DECL_EXTERNAL (decl1) = 1; DECL_NOT_REALLY_EXTERN (decl1) = 0; DECL_INTERFACE_KNOWN (decl1) = 1; DECL_DISREGARD_INLINE_LIMITS (decl1) = 1; } if (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (decl1)) /* This is a constructor, we must ensure that any default args introduced by this definition are propagated to the clones now. The clones are used directly in overload resolution. */ adjust_clone_args (decl1); /* Sometimes we don't notice that a function is a static member, and build a METHOD_TYPE for it. Fix that up now. */ gcc_assert (!(ctype != NULL_TREE && DECL_STATIC_FUNCTION_P (decl1) && TREE_CODE (TREE_TYPE (decl1)) == METHOD_TYPE)); /* Set up current_class_type, and enter the scope of the class, if appropriate. */ if (ctype) push_nested_class (ctype); else if (DECL_STATIC_FUNCTION_P (decl1)) push_nested_class (DECL_CONTEXT (decl1)); /* Now that we have entered the scope of the class, we must restore the bindings for any template parameters surrounding DECL1, if it is an inline member template. (Order is important; consider the case where a template parameter has the same name as a field of the class.) It is not until after this point that PROCESSING_TEMPLATE_DECL is guaranteed to be set up correctly. */ if (flags & SF_INCLASS_INLINE) maybe_begin_member_template_processing (decl1); /* Effective C++ rule 15. */ if (warn_ecpp && DECL_ASSIGNMENT_OPERATOR_P (decl1) && DECL_OVERLOADED_OPERATOR_IS (decl1, NOP_EXPR) && VOID_TYPE_P (TREE_TYPE (fntype))) warning (OPT_Weffc__, "%<operator=%> should return a reference to %<*this%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in poplevel) with the BLOCK. */ if (!DECL_INITIAL (decl1)) DECL_INITIAL (decl1) = error_mark_node; /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* We must call push_template_decl after current_class_type is set up. (If we are processing inline definitions after exiting a class scope, current_class_type will be NULL_TREE until set above by push_nested_class.) */ if (processing_template_decl) { tree newdecl1 = push_template_decl (decl1); if (newdecl1 == error_mark_node) { if (ctype || DECL_STATIC_FUNCTION_P (decl1)) pop_nested_class (); return false; } decl1 = newdecl1; } /* Make sure the parameter and return types are reasonable. When you declare a function, these types can be incomplete, but they must be complete when you define the function. */ check_function_type (decl1, DECL_ARGUMENTS (decl1)); /* Build the return declaration for the function. */ restype = TREE_TYPE (fntype); if (DECL_RESULT (decl1) == NULL_TREE) { tree resdecl; resdecl = build_decl (input_location, RESULT_DECL, 0, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (decl1) = resdecl; cp_apply_type_quals_to_decl (cp_type_quals (restype), resdecl); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ if (!processing_template_decl && !(flags & SF_PRE_PARSED)) { /* A specialization is not used to guide overload resolution. */ if (!DECL_FUNCTION_MEMBER_P (decl1) && !(DECL_USE_TEMPLATE (decl1) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl1)))) { tree olddecl = pushdecl (decl1); if (olddecl == error_mark_node) /* If something went wrong when registering the declaration, use DECL1; we have to have a FUNCTION_DECL to use when parsing the body of the function. */ ; else { /* Otherwise, OLDDECL is either a previous declaration of the same function or DECL1 itself. */ if (warn_missing_declarations && olddecl == decl1 && !DECL_MAIN_P (decl1) && TREE_PUBLIC (decl1) && !DECL_DECLARED_INLINE_P (decl1)) { tree context; /* Check whether DECL1 is in an anonymous namespace. */ for (context = DECL_CONTEXT (decl1); context; context = DECL_CONTEXT (context)) { if (TREE_CODE (context) == NAMESPACE_DECL && DECL_NAME (context) == NULL_TREE) break; } if (context == NULL) warning_at (DECL_SOURCE_LOCATION (decl1), OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); } decl1 = olddecl; } } else { /* We need to set the DECL_CONTEXT. */ if (!DECL_CONTEXT (decl1) && DECL_TEMPLATE_INFO (decl1)) DECL_CONTEXT (decl1) = DECL_CONTEXT (DECL_TI_TEMPLATE (decl1)); } fntype = TREE_TYPE (decl1); restype = TREE_TYPE (fntype); /* If #pragma weak applies, mark the decl appropriately now. The pragma only applies to global functions. Because determining whether or not the #pragma applies involves computing the mangled name for the declaration, we cannot apply the pragma until after we have merged this declaration with any previous declarations; if the original declaration has a linkage specification, that specification applies to the definition as well, and may affect the mangled name. */ if (DECL_FILE_SCOPE_P (decl1)) maybe_apply_pragma_weak (decl1); } /* We are now in the scope of the function being defined. */ current_function_decl = decl1; /* Save the parm names or decls from this function's declarator where store_parm_decls will find them. */ current_function_parms = DECL_ARGUMENTS (decl1); /* Let the user know we're compiling this function. */ announce_function (decl1); gcc_assert (DECL_INITIAL (decl1)); /* This function may already have been parsed, in which case just return; our caller will skip over the body without parsing. */ if (DECL_INITIAL (decl1) != error_mark_node) return true; /* Initialize RTL machinery. We cannot do this until CURRENT_FUNCTION_DECL and DECL_RESULT are set up. We do this even when processing a template; this is how we get CFUN set up, and our per-function variables initialized. FIXME factor out the non-RTL stuff. */ bl = current_binding_level; allocate_struct_function (decl1, processing_template_decl); /* Initialize the language data structures. Whenever we start a new function, we destroy temporaries in the usual way. */ cfun->language = ggc_cleared_alloc<language_function> (); current_stmt_tree ()->stmts_are_full_exprs_p = 1; current_binding_level = bl; /* If we are (erroneously) defining a function that we have already defined before, wipe out what we knew before. */ gcc_checking_assert (!DECL_PENDING_INLINE_P (decl1)); FNDECL_USED_AUTO (decl1) = false; DECL_SAVED_AUTO_RETURN_TYPE (decl1) = NULL; if (!processing_template_decl && type_uses_auto (restype)) { FNDECL_USED_AUTO (decl1) = true; DECL_SAVED_AUTO_RETURN_TYPE (decl1) = restype; } /* Start the statement-tree, start the tree now. */ DECL_SAVED_TREE (decl1) = push_stmt_list (); if (ctype && !doing_friend && !DECL_STATIC_FUNCTION_P (decl1)) { /* We know that this was set up by `grokclassfn'. We do not wait until `store_parm_decls', since evil parse errors may never get us to that point. Here we keep the consistency between `current_class_type' and `current_class_ptr'. */ tree t = DECL_ARGUMENTS (decl1); gcc_assert (t != NULL_TREE && TREE_CODE (t) == PARM_DECL); gcc_assert (TYPE_PTR_P (TREE_TYPE (t))); cp_function_chain->x_current_class_ref = cp_build_fold_indirect_ref (t); /* Set this second to avoid shortcut in cp_build_indirect_ref. */ cp_function_chain->x_current_class_ptr = t; /* Constructors and destructors need to know whether they're "in charge" of initializing virtual base classes. */ t = DECL_CHAIN (t); if (DECL_HAS_IN_CHARGE_PARM_P (decl1)) { current_in_charge_parm = t; t = DECL_CHAIN (t); } if (DECL_HAS_VTT_PARM_P (decl1)) { gcc_assert (DECL_NAME (t) == vtt_parm_identifier); current_vtt_parm = t; } } honor_interface = (!DECL_TEMPLATE_INSTANTIATION (decl1) /* Implicitly-defined methods (like the destructor for a class in which no destructor is explicitly declared) must not be defined until their definition is needed. So, we ignore interface specifications for compiler-generated functions. */ && !DECL_ARTIFICIAL (decl1)); if (processing_template_decl) /* Don't mess with interface flags. */; else if (DECL_INTERFACE_KNOWN (decl1)) { tree ctx = decl_function_context (decl1); if (DECL_NOT_REALLY_EXTERN (decl1)) DECL_EXTERNAL (decl1) = 0; if (ctx != NULL_TREE && vague_linkage_p (ctx)) /* This is a function in a local class in an extern inline or template function. */ comdat_linkage (decl1); } /* If this function belongs to an interface, it is public. If it belongs to someone else's interface, it is also external. This only affects inlines and template instantiations. */ else if (!finfo->interface_unknown && honor_interface) { if (DECL_DECLARED_INLINE_P (decl1) || DECL_TEMPLATE_INSTANTIATION (decl1)) { DECL_EXTERNAL (decl1) = (finfo->interface_only || (DECL_DECLARED_INLINE_P (decl1) && ! flag_implement_inlines && !DECL_VINDEX (decl1))); /* For WIN32 we also want to put these in linkonce sections. */ maybe_make_one_only (decl1); } else DECL_EXTERNAL (decl1) = 0; DECL_INTERFACE_KNOWN (decl1) = 1; /* If this function is in an interface implemented in this file, make sure that the back end knows to emit this function here. */ if (!DECL_EXTERNAL (decl1)) mark_needed (decl1); } else if (finfo->interface_unknown && finfo->interface_only && honor_interface) { /* If MULTIPLE_SYMBOL_SPACES is defined and we saw a #pragma interface, we will have both finfo->interface_unknown and finfo->interface_only set. In that case, we don't want to use the normal heuristics because someone will supply a #pragma implementation elsewhere, and deducing it here would produce a conflict. */ comdat_linkage (decl1); DECL_EXTERNAL (decl1) = 0; DECL_INTERFACE_KNOWN (decl1) = 1; DECL_DEFER_OUTPUT (decl1) = 1; } else { /* This is a definition, not a reference. So clear DECL_EXTERNAL, unless this is a GNU extern inline. */ if (!GNU_INLINE_P (decl1)) DECL_EXTERNAL (decl1) = 0; if ((DECL_DECLARED_INLINE_P (decl1) || DECL_TEMPLATE_INSTANTIATION (decl1)) && ! DECL_INTERFACE_KNOWN (decl1)) DECL_DEFER_OUTPUT (decl1) = 1; else DECL_INTERFACE_KNOWN (decl1) = 1; } /* Determine the ELF visibility attribute for the function. We must not do this before calling "pushdecl", as we must allow "duplicate_decls" to merge any attributes appropriately. We also need to wait until linkage is set. */ if (!DECL_CLONED_FUNCTION_P (decl1)) determine_visibility (decl1); if (!processing_template_decl) maybe_instantiate_noexcept (decl1); begin_scope (sk_function_parms, decl1); ++function_depth; if (DECL_DESTRUCTOR_P (decl1) || (DECL_CONSTRUCTOR_P (decl1) && targetm.cxx.cdtor_returns_this ())) { cdtor_label = create_artificial_label (input_location); LABEL_DECL_CDTOR (cdtor_label) = true; } start_fname_decls (); store_parm_decls (current_function_parms); push_operator_bindings (); if (!processing_template_decl && (flag_lifetime_dse > 1) && DECL_CONSTRUCTOR_P (decl1) && !DECL_CLONED_FUNCTION_P (decl1) /* Clobbering an empty base is harmful if it overlays real data. */ && !is_empty_class (current_class_type) /* We can't clobber safely for an implicitly-defined default constructor because part of the initialization might happen before we enter the constructor, via AGGR_INIT_ZERO_FIRST (c++/68006). */ && !implicit_default_ctor_p (decl1)) finish_expr_stmt (build_clobber_this ()); if (!processing_template_decl && DECL_CONSTRUCTOR_P (decl1) && sanitize_flags_p (SANITIZE_VPTR) && !DECL_CLONED_FUNCTION_P (decl1) && !implicit_default_ctor_p (decl1)) cp_ubsan_maybe_initialize_vtbl_ptrs (current_class_ptr); if (!DECL_OMP_DECLARE_REDUCTION_P (decl1)) start_lambda_scope (decl1); return true; } /* Like start_preparsed_function, except that instead of a FUNCTION_DECL, this function takes DECLSPECS and DECLARATOR. Returns true on success. If the DECLARATOR is not suitable for a function, we return false, which tells the parser to skip the entire function. */ bool start_function (cp_decl_specifier_seq *declspecs, const cp_declarator *declarator, tree attrs) { tree decl1; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, 1, &attrs); invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1); if (decl1 == error_mark_node) return false; if (DECL_MAIN_P (decl1)) /* main must return int. grokfndecl should have corrected it (and issued a diagnostic) if the user got it wrong. */ gcc_assert (same_type_p (TREE_TYPE (TREE_TYPE (decl1)), integer_type_node)); return start_preparsed_function (decl1, attrs, /*flags=*/SF_DEFAULT); } /* Returns true iff an EH_SPEC_BLOCK should be created in the body of FN. */ static bool use_eh_spec_block (tree fn) { return (flag_exceptions && flag_enforce_eh_specs && !processing_template_decl /* We insert the EH_SPEC_BLOCK only in the original function; then, it is copied automatically to the clones. */ && !DECL_CLONED_FUNCTION_P (fn) /* Implicitly-generated constructors and destructors have exception specifications. However, those specifications are the union of the possible exceptions specified by the constructors/destructors for bases and members, so no unallowed exception will ever reach this function. By not creating the EH_SPEC_BLOCK we save a little memory, and we avoid spurious warnings about unreachable code. */ && !DECL_DEFAULTED_FN (fn) && !type_throw_all_p (TREE_TYPE (fn))); } /* Helper function to push ARGS into the current lexical scope. DECL is the function declaration. NONPARMS is used to handle enum constants. */ void do_push_parm_decls (tree decl, tree args, tree *nonparms) { /* If we're doing semantic analysis, then we'll call pushdecl for each of these. We must do them in reverse order so that they end in the correct forward order. */ args = nreverse (args); tree next; for (tree parm = args; parm; parm = next) { next = DECL_CHAIN (parm); if (TREE_CODE (parm) == PARM_DECL) pushdecl (parm); else if (nonparms) { /* If we find an enum constant or a type tag, put it aside for the moment. */ TREE_CHAIN (parm) = NULL_TREE; *nonparms = chainon (*nonparms, parm); } } /* Get the decls in their original chain order and record in the function. This is all and only the PARM_DECLs that were pushed into scope by the loop above. */ DECL_ARGUMENTS (decl) = get_local_decls (); } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. Also install to binding contour return value identifier, if any. */ static void store_parm_decls (tree current_function_parms) { tree fndecl = current_function_decl; /* This is a chain of any other decls that came in among the parm declarations. If a parm is declared with enum {foo, bar} x; then CONST_DECLs for foo and bar are put here. */ tree nonparms = NULL_TREE; if (current_function_parms) { /* This case is when the function was defined with an ANSI prototype. The parms already have decls, so we need not do anything here except record them as in effect and complain if any redundant old-style parm decls were written. */ tree specparms = current_function_parms; /* Must clear this because it might contain TYPE_DECLs declared at class level. */ current_binding_level->names = NULL; do_push_parm_decls (fndecl, specparms, &nonparms); } else DECL_ARGUMENTS (fndecl) = NULL_TREE; /* Now store the final chain of decls for the arguments as the decl-chain of the current lexical scope. Put the enumerators in as well, at the front so that DECL_ARGUMENTS is not modified. */ current_binding_level->names = chainon (nonparms, DECL_ARGUMENTS (fndecl)); if (use_eh_spec_block (current_function_decl)) current_eh_spec_block = begin_eh_spec_block (); } /* Set the return value of the constructor (if present). */ static void finish_constructor_body (void) { tree val; tree exprstmt; if (targetm.cxx.cdtor_returns_this ()) { /* Any return from a constructor will end up here. */ add_stmt (build_stmt (input_location, LABEL_EXPR, cdtor_label)); val = DECL_ARGUMENTS (current_function_decl); val = build2 (MODIFY_EXPR, TREE_TYPE (val), DECL_RESULT (current_function_decl), val); /* Return the address of the object. */ exprstmt = build_stmt (input_location, RETURN_EXPR, val); add_stmt (exprstmt); } } /* Do all the processing for the beginning of a destructor; set up the vtable pointers and cleanups for bases and members. */ static void begin_destructor_body (void) { tree compound_stmt; /* If the CURRENT_CLASS_TYPE is incomplete, we will have already issued an error message. We still want to try to process the body of the function, but initialize_vtbl_ptrs will crash if TYPE_BINFO is NULL. */ if (COMPLETE_TYPE_P (current_class_type)) { compound_stmt = begin_compound_stmt (0); /* Make all virtual function table pointers in non-virtual base classes point to CURRENT_CLASS_TYPE's virtual function tables. */ initialize_vtbl_ptrs (current_class_ptr); finish_compound_stmt (compound_stmt); if (flag_lifetime_dse /* Clobbering an empty base is harmful if it overlays real data. */ && !is_empty_class (current_class_type)) { if (sanitize_flags_p (SANITIZE_VPTR) && (flag_sanitize_recover & SANITIZE_VPTR) == 0 && TYPE_CONTAINS_VPTR_P (current_class_type)) { tree binfo = TYPE_BINFO (current_class_type); tree ref = cp_build_fold_indirect_ref (current_class_ptr); tree vtbl_ptr = build_vfield_ref (ref, TREE_TYPE (binfo)); tree vtbl = build_zero_cst (TREE_TYPE (vtbl_ptr)); tree stmt = cp_build_modify_expr (input_location, vtbl_ptr, NOP_EXPR, vtbl, tf_warning_or_error); /* If the vptr is shared with some virtual nearly empty base, don't clear it if not in charge, the dtor of the virtual nearly empty base will do that later. */ if (CLASSTYPE_VBASECLASSES (current_class_type)) { tree c = current_class_type; while (CLASSTYPE_PRIMARY_BINFO (c)) { if (BINFO_VIRTUAL_P (CLASSTYPE_PRIMARY_BINFO (c))) { stmt = convert_to_void (stmt, ICV_STATEMENT, tf_warning_or_error); stmt = build_if_in_charge (stmt); break; } c = BINFO_TYPE (CLASSTYPE_PRIMARY_BINFO (c)); } } finish_decl_cleanup (NULL_TREE, stmt); } else finish_decl_cleanup (NULL_TREE, build_clobber_this ()); } /* And insert cleanups for our bases and members so that they will be properly destroyed if we throw. */ push_base_cleanups (); } } /* At the end of every destructor we generate code to delete the object if necessary. Do that now. */ static void finish_destructor_body (void) { tree exprstmt; /* Any return from a destructor will end up here; that way all base and member cleanups will be run when the function returns. */ add_stmt (build_stmt (input_location, LABEL_EXPR, cdtor_label)); if (targetm.cxx.cdtor_returns_this ()) { tree val; val = DECL_ARGUMENTS (current_function_decl); val = build2 (MODIFY_EXPR, TREE_TYPE (val), DECL_RESULT (current_function_decl), val); /* Return the address of the object. */ exprstmt = build_stmt (input_location, RETURN_EXPR, val); add_stmt (exprstmt); } } /* Do the necessary processing for the beginning of a function body, which in this case includes member-initializers, but not the catch clauses of a function-try-block. Currently, this means opening a binding level for the member-initializers (in a ctor), member cleanups (in a dtor), and capture proxies (in a lambda operator()). */ tree begin_function_body (void) { tree stmt; if (! FUNCTION_NEEDS_BODY_BLOCK (current_function_decl)) return NULL_TREE; if (processing_template_decl) /* Do nothing now. */; else /* Always keep the BLOCK node associated with the outermost pair of curly braces of a function. These are needed for correct operation of dwarfout.c. */ keep_next_level (true); stmt = begin_compound_stmt (BCS_FN_BODY); if (processing_template_decl) /* Do nothing now. */; else if (DECL_DESTRUCTOR_P (current_function_decl)) begin_destructor_body (); return stmt; } /* Do the processing for the end of a function body. Currently, this means closing out the cleanups for fully-constructed bases and members, and in the case of the destructor, deleting the object if desired. Again, this is only meaningful for [cd]tors, since they are the only functions where there is a significant distinction between the main body and any function catch clauses. Handling, say, main() return semantics here would be wrong, as flowing off the end of a function catch clause for main() would also need to return 0. */ void finish_function_body (tree compstmt) { if (compstmt == NULL_TREE) return; /* Close the block. */ finish_compound_stmt (compstmt); if (processing_template_decl) /* Do nothing now. */; else if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_constructor_body (); else if (DECL_DESTRUCTOR_P (current_function_decl)) finish_destructor_body (); } /* Given a function, returns the BLOCK corresponding to the outermost level of curly braces, skipping the artificial block created for constructor initializers. */ tree outer_curly_brace_block (tree fndecl) { tree block = DECL_INITIAL (fndecl); if (BLOCK_OUTER_CURLY_BRACE_P (block)) return block; block = BLOCK_SUBBLOCKS (block); if (BLOCK_OUTER_CURLY_BRACE_P (block)) return block; block = BLOCK_SUBBLOCKS (block); gcc_assert (BLOCK_OUTER_CURLY_BRACE_P (block)); return block; } /* If FNDECL is a class's key method, add the class to the list of keyed classes that should be emitted. */ static void record_key_method_defined (tree fndecl) { if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fndecl) && DECL_VIRTUAL_P (fndecl) && !processing_template_decl) { tree fnclass = DECL_CONTEXT (fndecl); if (fndecl == CLASSTYPE_KEY_METHOD (fnclass)) vec_safe_push (keyed_classes, fnclass); } } /* Subroutine of finish_function. Save the body of constexpr functions for possible future compile time evaluation. */ static void maybe_save_function_definition (tree fun) { if (!processing_template_decl && DECL_DECLARED_CONSTEXPR_P (fun) && !cp_function_chain->invalid_constexpr && !DECL_CLONED_FUNCTION_P (fun)) register_constexpr_fundef (fun, DECL_SAVED_TREE (fun)); } /* Attempt to add a fix-it hint to RICHLOC suggesting the insertion of "return *this;" immediately before its location, using FNDECL's first statement (if any) to give the indentation, if appropriate. */ static void add_return_star_this_fixit (gcc_rich_location *richloc, tree fndecl) { location_t indent = UNKNOWN_LOCATION; tree stmts = expr_first (DECL_SAVED_TREE (fndecl)); if (stmts) indent = EXPR_LOCATION (stmts); richloc->add_fixit_insert_formatted ("return *this;", richloc->get_loc (), indent); } /* This function carries out the subset of finish_function operations needed to emit the compiler-generated outlined helper functions used by the coroutines implementation. */ static void emit_coro_helper (tree helper) { /* This is a partial set of the operations done by finish_function() plus emitting the result. */ set_cfun (NULL); current_function_decl = helper; begin_scope (sk_function_parms, NULL); store_parm_decls (DECL_ARGUMENTS (helper)); announce_function (helper); allocate_struct_function (helper, false); cfun->language = ggc_cleared_alloc<language_function> (); poplevel (1, 0, 1); maybe_save_function_definition (helper); /* We must start each function with a clear fold cache. */ clear_fold_cache (); cp_fold_function (helper); DECL_CONTEXT (DECL_RESULT (helper)) = helper; BLOCK_SUPERCONTEXT (DECL_INITIAL (helper)) = helper; /* This function has coroutine IFNs that we should handle in middle end lowering. */ cfun->coroutine_component = true; cp_genericize (helper); expand_or_defer_fn (helper); } /* Finish up a function declaration and compile that function all the way to assembler language output. The free the storage for the function definition. INLINE_P is TRUE if we just finished processing the body of an in-class inline function definition. (This processing will have taken place after the class definition is complete.) */ tree finish_function (bool inline_p) { tree fndecl = current_function_decl; tree fntype, ctype = NULL_TREE; tree resumer = NULL_TREE, destroyer = NULL_TREE; bool coro_p = flag_coroutines && !processing_template_decl && DECL_COROUTINE_P (fndecl); bool coro_emit_helpers = false; /* When we get some parse errors, we can end up without a current_function_decl, so cope. */ if (fndecl == NULL_TREE) return error_mark_node; if (!DECL_OMP_DECLARE_REDUCTION_P (fndecl)) finish_lambda_scope (); if (c_dialect_objc ()) objc_finish_function (); record_key_method_defined (fndecl); fntype = TREE_TYPE (fndecl); /* TREE_READONLY (fndecl) = 1; This caused &foo to be of type ptr-to-const-function which then got a warning when stored in a ptr-to-function variable. */ gcc_assert (building_stmt_list_p ()); /* The current function is being defined, so its DECL_INITIAL should be set, and unless there's a multiple definition, it should be error_mark_node. */ gcc_assert (DECL_INITIAL (fndecl) == error_mark_node); if (coro_p) { /* Only try to emit the coroutine outlined helper functions if the transforms succeeded. Otherwise, treat errors in the same way as a regular function. */ coro_emit_helpers = morph_fn_to_coro (fndecl, &resumer, &destroyer); /* We should handle coroutine IFNs in middle end lowering. */ cfun->coroutine_component = true; /* Do not try to process the ramp's EH unless outlining succeeded. */ if (coro_emit_helpers && use_eh_spec_block (fndecl)) finish_eh_spec_block (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fndecl)), current_eh_spec_block); } else /* For a cloned function, we've already got all the code we need; there's no need to add any extra bits. */ if (!DECL_CLONED_FUNCTION_P (fndecl)) { /* Make it so that `main' always returns 0 by default. */ if (DECL_MAIN_P (current_function_decl)) finish_return_stmt (integer_zero_node); if (use_eh_spec_block (current_function_decl)) finish_eh_spec_block (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (current_function_decl)), current_eh_spec_block); } /* If we're saving up tree structure, tie off the function now. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); finish_fname_decls (); /* If this function can't throw any exceptions, remember that. */ if (!processing_template_decl && !cp_function_chain->can_throw && !flag_non_call_exceptions && !decl_replaceable_p (fndecl)) TREE_NOTHROW (fndecl) = 1; /* This must come after expand_function_end because cleanups might have declarations (from inline functions) that need to go into this function's blocks. */ /* If the current binding level isn't the outermost binding level for this function, either there is a bug, or we have experienced syntax errors and the statement tree is malformed. */ if (current_binding_level->kind != sk_function_parms) { /* Make sure we have already experienced errors. */ gcc_assert (errorcount); /* Throw away the broken statement tree and extra binding levels. */ DECL_SAVED_TREE (fndecl) = alloc_stmt_list (); while (current_binding_level->kind != sk_function_parms) { if (current_binding_level->kind == sk_class) pop_nested_class (); else poplevel (0, 0, 0); } } poplevel (1, 0, 1); /* Statements should always be full-expressions at the outermost set of curly braces for a function. */ gcc_assert (stmts_are_full_exprs_p ()); /* If there are no return statements in a function with auto return type, the return type is void. But if the declared type is something like auto*, this is an error. */ if (!processing_template_decl && FNDECL_USED_AUTO (fndecl) && TREE_TYPE (fntype) == DECL_SAVED_AUTO_RETURN_TYPE (fndecl)) { if (is_auto (DECL_SAVED_AUTO_RETURN_TYPE (fndecl)) && !current_function_returns_value && !current_function_returns_null) { /* We haven't applied return type deduction because we haven't seen any return statements. Do that now. */ tree node = type_uses_auto (DECL_SAVED_AUTO_RETURN_TYPE (fndecl)); do_auto_deduction (DECL_SAVED_AUTO_RETURN_TYPE (fndecl), void_node, node, tf_warning_or_error, adc_return_type); apply_deduced_return_type (fndecl, void_type_node); fntype = TREE_TYPE (fndecl); } else if (!current_function_returns_value && !current_function_returns_null) { error ("no return statements in function returning %qT", DECL_SAVED_AUTO_RETURN_TYPE (fndecl)); inform (input_location, "only plain %<auto%> return type can be " "deduced to %<void%>"); } } /* Remember that we were in class scope. */ if (current_class_name) ctype = current_class_type; if (DECL_DELETED_FN (fndecl)) { DECL_INITIAL (fndecl) = error_mark_node; DECL_SAVED_TREE (fndecl) = NULL_TREE; goto cleanup; } // If this is a concept, check that the definition is reasonable. if (DECL_DECLARED_CONCEPT_P (fndecl)) check_function_concept (fndecl); if (flag_openmp) if (tree attr = lookup_attribute ("omp declare variant base", DECL_ATTRIBUTES (fndecl))) omp_declare_variant_finalize (fndecl, attr); /* Lambda closure members are implicitly constexpr if possible. */ if (cxx_dialect >= cxx17 && LAMBDA_TYPE_P (CP_DECL_CONTEXT (fndecl))) DECL_DECLARED_CONSTEXPR_P (fndecl) = ((processing_template_decl || is_valid_constexpr_fn (fndecl, /*complain*/false)) && potential_constant_expression (DECL_SAVED_TREE (fndecl))); /* Save constexpr function body before it gets munged by the NRV transformation. */ maybe_save_function_definition (fndecl); /* Invoke the pre-genericize plugin before we start munging things. */ if (!processing_template_decl) invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); /* Perform delayed folding before NRV transformation. */ if (!processing_template_decl && !DECL_IMMEDIATE_FUNCTION_P (fndecl) && !DECL_OMP_DECLARE_REDUCTION_P (fndecl)) cp_fold_function (fndecl); /* Set up the named return value optimization, if we can. Candidate variables are selected in check_return_expr. */ if (current_function_return_value) { tree r = current_function_return_value; tree outer; if (r != error_mark_node /* This is only worth doing for fns that return in memory--and simpler, since we don't have to worry about promoted modes. */ && aggregate_value_p (TREE_TYPE (TREE_TYPE (fndecl)), fndecl) /* Only allow this for variables declared in the outer scope of the function so we know that their lifetime always ends with a return; see g++.dg/opt/nrv6.C. We could be more flexible if we were to do this optimization in tree-ssa. */ && (outer = outer_curly_brace_block (fndecl)) && chain_member (r, BLOCK_VARS (outer))) finalize_nrv (&DECL_SAVED_TREE (fndecl), r, DECL_RESULT (fndecl)); current_function_return_value = NULL_TREE; } /* Must mark the RESULT_DECL as being in this function. */ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; /* Set the BLOCK_SUPERCONTEXT of the outermost function scope to point to the FUNCTION_DECL node itself. */ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Complain if there's just no return statement. */ if (warn_return_type && !VOID_TYPE_P (TREE_TYPE (fntype)) && !dependent_type_p (TREE_TYPE (fntype)) && !current_function_returns_value && !current_function_returns_null /* Don't complain if we abort or throw. */ && !current_function_returns_abnormally /* Don't complain if there's an infinite loop. */ && !current_function_infinite_loop /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) && !DECL_NAME (DECL_RESULT (fndecl)) && !TREE_NO_WARNING (fndecl) /* Structor return values (if any) are set by the compiler. */ && !DECL_CONSTRUCTOR_P (fndecl) && !DECL_DESTRUCTOR_P (fndecl) && targetm.warn_func_return (fndecl)) { gcc_rich_location richloc (input_location); /* Potentially add a "return *this;" fix-it hint for assignment operators. */ if (IDENTIFIER_ASSIGN_OP_P (DECL_NAME (fndecl))) { tree valtype = TREE_TYPE (DECL_RESULT (fndecl)); if (TREE_CODE (valtype) == REFERENCE_TYPE && current_class_ref && same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (valtype), TREE_TYPE (current_class_ref)) && global_dc->option_enabled (OPT_Wreturn_type, global_dc->lang_mask, global_dc->option_state)) add_return_star_this_fixit (&richloc, fndecl); } if (warning_at (&richloc, OPT_Wreturn_type, "no return statement in function returning non-void")) TREE_NO_WARNING (fndecl) = 1; } /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter && !processing_template_decl && errorcount == unused_but_set_errorcount && !DECL_CLONED_FUNCTION_P (fndecl)) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl) && !DECL_IN_SYSTEM_HEADER (decl) && TREE_TYPE (decl) != error_mark_node && !TYPE_REF_P (TREE_TYPE (decl)) && (!CLASS_TYPE_P (TREE_TYPE (decl)) || !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); unused_but_set_errorcount = errorcount; } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Possibly warn about unused parameters. */ if (warn_unused_parameter && !processing_template_decl && !DECL_CLONED_FUNCTION_P (fndecl)) do_warn_unused_parameter (fndecl); /* Genericize before inlining. */ if (!processing_template_decl && !DECL_IMMEDIATE_FUNCTION_P (fndecl) && !DECL_OMP_DECLARE_REDUCTION_P (fndecl)) cp_genericize (fndecl); /* Emit the resumer and destroyer functions now, providing that we have not encountered some fatal error. */ if (coro_emit_helpers) { emit_coro_helper (resumer); emit_coro_helper (destroyer); } cleanup: /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); current_function_decl = NULL; /* If this is an in-class inline definition, we may have to pop the bindings for the template parameters that we added in maybe_begin_member_template_processing when start_function was called. */ if (inline_p) maybe_end_member_template_processing (); /* Leave the scope of the class. */ if (ctype) pop_nested_class (); --function_depth; /* Clean up. */ current_function_decl = NULL_TREE; invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, fndecl); return fndecl; } /* Create the FUNCTION_DECL for a function definition. DECLSPECS and DECLARATOR are the parts of the declaration; they describe the return type and the name of the function, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns a FUNCTION_DECL on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. May return void_type_node indicating that this method is actually a friend. See grokfield for more details. Came here with a `.pushlevel' . DO NOT MAKE ANY CHANGES TO THIS CODE WITHOUT MAKING CORRESPONDING CHANGES TO CODE IN `grokfield'. */ tree grokmethod (cp_decl_specifier_seq *declspecs, const cp_declarator *declarator, tree attrlist) { tree fndecl = grokdeclarator (declarator, declspecs, MEMFUNCDEF, 0, &attrlist); if (fndecl == error_mark_node) return error_mark_node; if (attrlist) cplus_decl_attributes (&fndecl, attrlist, 0); /* Pass friends other than inline friend functions back. */ if (fndecl == void_type_node) return fndecl; if (DECL_IN_AGGR_P (fndecl)) { if (DECL_CLASS_SCOPE_P (fndecl)) error ("%qD is already defined in class %qT", fndecl, DECL_CONTEXT (fndecl)); return error_mark_node; } check_template_shadow (fndecl); if (TREE_PUBLIC (fndecl)) DECL_COMDAT (fndecl) = 1; DECL_DECLARED_INLINE_P (fndecl) = 1; DECL_NO_INLINE_WARNING_P (fndecl) = 1; /* We process method specializations in finish_struct_1. */ if (processing_template_decl && !DECL_TEMPLATE_SPECIALIZATION (fndecl)) { fndecl = push_template_decl (fndecl); if (fndecl == error_mark_node) return fndecl; } if (! DECL_FRIEND_P (fndecl)) { if (DECL_CHAIN (fndecl)) { fndecl = copy_node (fndecl); TREE_CHAIN (fndecl) = NULL_TREE; } } cp_finish_decl (fndecl, NULL_TREE, false, NULL_TREE, 0); DECL_IN_AGGR_P (fndecl) = 1; return fndecl; } /* VAR is a VAR_DECL. If its type is incomplete, remember VAR so that we can lay it out later, when and if its type becomes complete. Also handle constexpr variables where the initializer involves an unlowered PTRMEM_CST because the class isn't complete yet. */ void maybe_register_incomplete_var (tree var) { gcc_assert (VAR_P (var)); /* Keep track of variables with incomplete types. */ if (!processing_template_decl && TREE_TYPE (var) != error_mark_node && DECL_EXTERNAL (var)) { tree inner_type = TREE_TYPE (var); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); inner_type = TYPE_MAIN_VARIANT (inner_type); if ((!COMPLETE_TYPE_P (inner_type) && CLASS_TYPE_P (inner_type)) /* RTTI TD entries are created while defining the type_info. */ || (TYPE_LANG_SPECIFIC (inner_type) && TYPE_BEING_DEFINED (inner_type))) { incomplete_var iv = {var, inner_type}; vec_safe_push (incomplete_vars, iv); } else if (!(DECL_LANG_SPECIFIC (var) && DECL_TEMPLATE_INFO (var)) && decl_constant_var_p (var) && (TYPE_PTRMEM_P (inner_type) || CLASS_TYPE_P (inner_type))) { /* When the outermost open class is complete we can resolve any pointers-to-members. */ tree context = outermost_open_class (); incomplete_var iv = {var, context}; vec_safe_push (incomplete_vars, iv); } } } /* Called when a class type (given by TYPE) is defined. If there are any existing VAR_DECLs whose type has been completed by this declaration, update them now. */ void complete_vars (tree type) { unsigned ix; incomplete_var *iv; for (ix = 0; vec_safe_iterate (incomplete_vars, ix, &iv); ) { if (same_type_p (type, iv->incomplete_type)) { tree var = iv->decl; tree type = TREE_TYPE (var); if (type != error_mark_node && (TYPE_MAIN_VARIANT (strip_array_types (type)) == iv->incomplete_type)) { /* Complete the type of the variable. The VAR_DECL itself will be laid out in expand_expr. */ complete_type (type); cp_apply_type_quals_to_decl (cp_type_quals (type), var); } /* Remove this entry from the list. */ incomplete_vars->unordered_remove (ix); } else ix++; } /* Check for pending declarations which may have abstract type. */ complete_type_check_abstract (type); } /* If DECL is of a type which needs a cleanup, build and return an expression to perform that cleanup here. Return NULL_TREE if no cleanup need be done. DECL can also be a _REF when called from split_nonconstant_init_1. */ tree cxx_maybe_build_cleanup (tree decl, tsubst_flags_t complain) { tree type; tree attr; tree cleanup; /* Assume no cleanup is required. */ cleanup = NULL_TREE; if (error_operand_p (decl)) return cleanup; /* Handle "__attribute__((cleanup))". We run the cleanup function before the destructor since the destructor is what actually terminates the lifetime of the object. */ if (DECL_P (decl)) attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); else attr = NULL_TREE; if (attr) { tree id; tree fn; tree arg; /* Get the name specified by the user for the cleanup function. */ id = TREE_VALUE (TREE_VALUE (attr)); /* Look up the name to find the cleanup function to call. It is important to use lookup_name here because that is what is used in c-common.c:handle_cleanup_attribute when performing initial checks on the attribute. Note that those checks include ensuring that the function found is not an overloaded function, or an object with an overloaded call operator, etc.; we can rely on the fact that the function found is an ordinary FUNCTION_DECL. */ fn = lookup_name (id); arg = build_address (decl); if (!mark_used (decl, complain) && !(complain & tf_error)) return error_mark_node; cleanup = cp_build_function_call_nary (fn, complain, arg, NULL_TREE); if (cleanup == error_mark_node) return error_mark_node; } /* Handle ordinary C++ destructors. */ type = TREE_TYPE (decl); if (type_build_dtor_call (type)) { int flags = LOOKUP_NORMAL|LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR; tree addr; tree call; if (TREE_CODE (type) == ARRAY_TYPE) addr = decl; else addr = build_address (decl); call = build_delete (input_location, TREE_TYPE (addr), addr, sfk_complete_destructor, flags, 0, complain); if (call == error_mark_node) cleanup = error_mark_node; else if (TYPE_HAS_TRIVIAL_DESTRUCTOR (type)) /* Discard the call. */; else if (decl_maybe_constant_destruction (decl, type) && DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) cxx_constant_dtor (call, decl); else if (cleanup) cleanup = cp_build_compound_expr (cleanup, call, complain); else cleanup = call; } /* build_delete sets the location of the destructor call to the current location, even though the destructor is going to be called later, at the end of the current scope. This can lead to a "jumpy" behavior for users of debuggers when they step around the end of the block. So let's unset the location of the destructor call instead. */ protected_set_expr_location (cleanup, UNKNOWN_LOCATION); if (cleanup && CONVERT_EXPR_P (cleanup)) protected_set_expr_location (TREE_OPERAND (cleanup, 0), UNKNOWN_LOCATION); if (cleanup && DECL_P (decl) && !lookup_attribute ("warn_unused", TYPE_ATTRIBUTES (TREE_TYPE (decl))) /* Treat objects with destructors as used; the destructor may do something substantive. */ && !mark_used (decl, complain) && !(complain & tf_error)) return error_mark_node; if (cleanup && cfun && !processing_template_decl && !expr_noexcept_p (cleanup, tf_none)) cp_function_chain->throwing_cleanup = true; return cleanup; } /* Return the FUNCTION_TYPE that corresponds to MEMFNTYPE, which can be a FUNCTION_DECL, METHOD_TYPE, FUNCTION_TYPE, pointer or reference to METHOD_TYPE or FUNCTION_TYPE, or pointer to member function. */ tree static_fn_type (tree memfntype) { tree fntype; tree args; if (TYPE_PTRMEMFUNC_P (memfntype)) memfntype = TYPE_PTRMEMFUNC_FN_TYPE (memfntype); if (INDIRECT_TYPE_P (memfntype) || TREE_CODE (memfntype) == FUNCTION_DECL) memfntype = TREE_TYPE (memfntype); if (TREE_CODE (memfntype) == FUNCTION_TYPE) return memfntype; gcc_assert (TREE_CODE (memfntype) == METHOD_TYPE); args = TYPE_ARG_TYPES (memfntype); fntype = build_function_type (TREE_TYPE (memfntype), TREE_CHAIN (args)); fntype = apply_memfn_quals (fntype, type_memfn_quals (memfntype)); fntype = (cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (memfntype))); fntype = cxx_copy_lang_qualifiers (fntype, memfntype); return fntype; } /* DECL was originally constructed as a non-static member function, but turned out to be static. Update it accordingly. */ void revert_static_member_fn (tree decl) { tree stype = static_fn_type (decl); cp_cv_quals quals = type_memfn_quals (stype); cp_ref_qualifier rqual = type_memfn_rqual (stype); if (quals != TYPE_UNQUALIFIED || rqual != REF_QUAL_NONE) stype = apply_memfn_quals (stype, TYPE_UNQUALIFIED, REF_QUAL_NONE); TREE_TYPE (decl) = stype; if (DECL_ARGUMENTS (decl)) DECL_ARGUMENTS (decl) = DECL_CHAIN (DECL_ARGUMENTS (decl)); DECL_STATIC_FUNCTION_P (decl) = 1; } /* Return which tree structure is used by T, or TS_CP_GENERIC if T is one of the language-independent trees. */ enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node * t) { switch (TREE_CODE (&t->generic)) { case ARGUMENT_PACK_SELECT: return TS_CP_ARGUMENT_PACK_SELECT; case BASELINK: return TS_CP_BASELINK; case CONSTRAINT_INFO: return TS_CP_CONSTRAINT_INFO; case DEFERRED_NOEXCEPT: return TS_CP_DEFERRED_NOEXCEPT; case DEFERRED_PARSE: return TS_CP_DEFERRED_PARSE; case IDENTIFIER_NODE: return TS_CP_IDENTIFIER; case LAMBDA_EXPR: return TS_CP_LAMBDA_EXPR; case OVERLOAD: return TS_CP_OVERLOAD; case PTRMEM_CST: return TS_CP_PTRMEM; case STATIC_ASSERT: return TS_CP_STATIC_ASSERT; case TEMPLATE_DECL: return TS_CP_TEMPLATE_DECL; case TEMPLATE_INFO: return TS_CP_TEMPLATE_INFO; case TEMPLATE_PARM_INDEX: return TS_CP_TPI; case TRAIT_EXPR: return TS_CP_TRAIT_EXPR; case USERDEF_LITERAL: return TS_CP_USERDEF_LITERAL; default: return TS_CP_GENERIC; } } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } bool cp_missing_noreturn_ok_p (tree decl) { /* A missing noreturn is ok for the `main' function. */ return DECL_MAIN_P (decl); } /* Return the decl used to identify the COMDAT group into which DECL should be placed. */ tree cxx_comdat_group (tree decl) { /* Virtual tables, construction virtual tables, and virtual table tables all go in a single COMDAT group, named after the primary virtual table. */ if (VAR_P (decl) && DECL_VTABLE_OR_VTT_P (decl)) decl = CLASSTYPE_VTABLES (DECL_CONTEXT (decl)); /* For all other DECLs, the COMDAT group is the mangled name of the declaration itself. */ else { while (DECL_THUNK_P (decl)) { /* If TARGET_USE_LOCAL_THUNK_ALIAS_P, use_thunk puts the thunk into the same section as the target function. In that case we must return target's name. */ tree target = THUNK_TARGET (decl); if (TARGET_USE_LOCAL_THUNK_ALIAS_P (target) && DECL_SECTION_NAME (target) != NULL && DECL_ONE_ONLY (target)) decl = target; else break; } } return decl; } /* Returns the return type for FN as written by the user, which may include a placeholder for a deduced return type. */ tree fndecl_declared_return_type (tree fn) { fn = STRIP_TEMPLATE (fn); if (FNDECL_USED_AUTO (fn)) return DECL_SAVED_AUTO_RETURN_TYPE (fn); return TREE_TYPE (TREE_TYPE (fn)); } /* Returns true iff DECL is a variable or function declared with an auto type that has not yet been deduced to a real type. */ bool undeduced_auto_decl (tree decl) { if (cxx_dialect < cxx11) return false; STRIP_ANY_LOCATION_WRAPPER (decl); return ((VAR_OR_FUNCTION_DECL_P (decl) || TREE_CODE (decl) == TEMPLATE_DECL) && type_uses_auto (TREE_TYPE (decl))); } /* Complain if DECL has an undeduced return type. */ bool require_deduced_type (tree decl, tsubst_flags_t complain) { if (undeduced_auto_decl (decl)) { if (TREE_NO_WARNING (decl) && seen_error ()) /* We probably already complained about deduction failure. */; else if (complain & tf_error) error ("use of %qD before deduction of %<auto%>", decl); return false; } return true; } /* Create a representation of the explicit-specifier with constant-expression of EXPR. COMPLAIN is as for tsubst. */ tree build_explicit_specifier (tree expr, tsubst_flags_t complain) { if (instantiation_dependent_expression_p (expr)) /* Wait for instantiation, tsubst_function_decl will handle it. */ return expr; expr = build_converted_constant_bool_expr (expr, complain); expr = instantiate_non_dependent_expr_sfinae (expr, complain); expr = cxx_constant_value (expr); return expr; } #include "gt-cp-decl.h"
GB_binop__min_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_int16 // A.*B function (eWiseMult): GB_AemultB__min_int16 // A*D function (colscale): GB_AxD__min_int16 // D*A function (rowscale): GB_DxB__min_int16 // C+=B function (dense accum): GB_Cdense_accumB__min_int16 // C+=b function (dense accum): GB_Cdense_accumb__min_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_int16 // C=scalar+B GB_bind1st__min_int16 // C=scalar+B' GB_bind1st_tran__min_int16 // C=A+scalar GB_bind2nd__min_int16 // C=A'+scalar GB_bind2nd_tran__min_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IMIN (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT16 || GxB_NO_MIN_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__min_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bget_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bget_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bget_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bget_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bget_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int8) // C=scalar+B GB (_bind1st__bget_int8) // C=scalar+B' GB (_bind1st_tran__bget_int8) // C=A+scalar GB (_bind2nd__bget_int8) // C=A'+scalar GB (_bind2nd_tran__bget_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITGET (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, int8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT8 || GxB_NO_BGET_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, int8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bget_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, int8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bget_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mosaic.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "common.h" /** abritrary maximum number of colours */ #define MAXCOLOURS 24 /** the colours */ static pixel_t colours[MAXCOLOURS]; static size_t ncolours = 0; static unsigned char distrib[256]; /** used to add a new colour */ void mosaic_addColour( unsigned char r, unsigned char g, unsigned char b) { pixel_t colour = { r, g, b }; if(ncolours < MAXCOLOURS) { colours[ncolours] = colour; ncolours++; } else { fprintf(stderr, "Too many colours!\n"); abort(); } } static inline void _process(pixel_t pin, pixel_t* p) { size_t i = 0; int med = (float)(GET(pin, RC_R) + GET(pin, RC_G) + GET(pin, RC_B) ) / 3.f; *p = colours[distrib[med]]; } typedef struct { size_t i; img_t in, out; } tdata_t; static void _tprocess(void* data) { tdata_t* mydata = (tdata_t*)data; size_t j; for(j = 0; j < mydata->in.w; ++j) { _process(A(mydata->in, mydata->i, j), &A(mydata->out, mydata->i, j)); } free(mydata); } static void _detdist(img_t img) { int cls[256]; size_t i, j; int thresh, current, count; for(i = 0; i < 256; ++i) { cls[i] = 0; } thresh = 0; for(i = 0; i < img.h; ++i) { for(j = 0; j < img.w; ++j) { pixel_t p = A(img, i, j); int med = (float)( GET(p, RC_R) + GET(p, RC_G) + GET(p, RC_B) ) / 3.f; thresh += cls[med] == 0; cls[med] = 1; } } thresh /= ncolours; current = count = 0; for(i = 0; i < 256; ++i) { count += (cls[i] > 0); if(count > thresh) { ++current; count = 0; } distrib[i] = current; } } /** apply a colour transformation based on relationships between colour components (RGB) rules are added with recolour_addRule */ img_t mosaic(img_t const img) { img_t ret = { img.w, img.h, (pixel_t*)malloc(img.w * img.h * sizeof(pixel_t)) }; size_t i, j; // determine distribution _detdist(img); // process #pragma omp parallel for for(i = 0; i < img.h; ++i) { tdata_t* data = (tdata_t*)malloc(sizeof(tdata_t)); data->i = i; data->in = img; data->out = ret; _tprocess(data); } return ret; }
12.c
/* Модифицируйте задачу 1 так, что бы потоки распечатывали свои идентификаторы в обратном порядке. Существует как минимум 5 способов решения. Постарайтесь найти как можно больше. */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int threads = 8; omp_set_num_threads(threads); #pragma omp parallel shared(threads) { for (;;) { if (omp_get_thread_num() + 1 == threads) { printf("Hello World! Это тред номер %d, всего таких нас %d\n", omp_get_thread_num(), omp_get_num_threads()); threads--; break; } } } }
search.h
// -*- C++ -*- // Copyright (C) 2007-2015 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/search.h * @brief Parallel implementation base for std::search() and * std::search_n(). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_SEARCH_H #define _GLIBCXX_PARALLEL_SEARCH_H 1 #include <bits/stl_algobase.h> #include <parallel/parallel.h> #include <parallel/equally_split.h> namespace __gnu_parallel { /** * @brief Precalculate __advances for Knuth-Morris-Pratt algorithm. * @param __elements Begin iterator of sequence to search for. * @param __length Length of sequence to search for. * @param __off Returned __offsets. */ template<typename _RAIter, typename _DifferenceTp> void __calc_borders(_RAIter __elements, _DifferenceTp __length, _DifferenceTp* __off) { typedef _DifferenceTp _DifferenceType; __off[0] = -1; if (__length > 1) __off[1] = 0; _DifferenceType __k = 0; for (_DifferenceType __j = 2; __j <= __length; __j++) { while ((__k >= 0) && !(__elements[__k] == __elements[__j-1])) __k = __off[__k]; __off[__j] = ++__k; } } // Generic parallel find algorithm (requires random access iterator). /** @brief Parallel std::search. * @param __begin1 Begin iterator of first sequence. * @param __end1 End iterator of first sequence. * @param __begin2 Begin iterator of second sequence. * @param __end2 End iterator of second sequence. * @param __pred Find predicate. * @return Place of finding in first sequences. */ template<typename __RAIter1, typename __RAIter2, typename _Pred> __RAIter1 __search_template(__RAIter1 __begin1, __RAIter1 __end1, __RAIter2 __begin2, __RAIter2 __end2, _Pred __pred) { typedef std::iterator_traits<__RAIter1> _TraitsType; typedef typename _TraitsType::difference_type _DifferenceType; _GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2)); _DifferenceType __pattern_length = __end2 - __begin2; // Pattern too short. if(__pattern_length <= 0) return __end1; // Last point to start search. _DifferenceType __input_length = (__end1 - __begin1) - __pattern_length; // Where is first occurrence of pattern? defaults to end. _DifferenceType __result = (__end1 - __begin1); _DifferenceType *__splitters; // Pattern too long. if (__input_length < 0) return __end1; omp_lock_t __result_lock; omp_init_lock(&__result_lock); _ThreadIndex __num_threads = std::max<_DifferenceType> (1, std::min<_DifferenceType>(__input_length, __get_max_threads())); _DifferenceType __advances[__pattern_length]; __calc_borders(__begin2, __pattern_length, __advances); # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __splitters = new _DifferenceType[__num_threads + 1]; __equally_split(__input_length, __num_threads, __splitters); } _ThreadIndex __iam = omp_get_thread_num(); _DifferenceType __start = __splitters[__iam], __stop = __splitters[__iam + 1]; _DifferenceType __pos_in_pattern = 0; bool __found_pattern = false; while (__start <= __stop && !__found_pattern) { // Get new value of result. #pragma omp flush(__result) // No chance for this thread to find first occurrence. if (__result < __start) break; while (__pred(__begin1[__start + __pos_in_pattern], __begin2[__pos_in_pattern])) { ++__pos_in_pattern; if (__pos_in_pattern == __pattern_length) { // Found new candidate for result. omp_set_lock(&__result_lock); __result = std::min(__result, __start); omp_unset_lock(&__result_lock); __found_pattern = true; break; } } // Make safe jump. __start += (__pos_in_pattern - __advances[__pos_in_pattern]); __pos_in_pattern = (__advances[__pos_in_pattern] < 0 ? 0 : __advances[__pos_in_pattern]); } } //parallel omp_destroy_lock(&__result_lock); delete[] __splitters; // Return iterator on found element. return (__begin1 + __result); } } // end namespace #endif /* _GLIBCXX_PARALLEL_SEARCH_H */
GB_unop__identity_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_uint32) // op(A') function: GB (_unop_tran__identity_int8_uint32) // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_uint32) ( int8_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** © 2009-2019 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) #include <fcntl.h> /* O_BINARY */ #include <io.h> /* setmode() */ #include <locale.h> /* UTF-8 locale */ #else #include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (May 2021)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(local_liq, log_callback_buferred, &buf); liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwhile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc((size_t)output_image->height * (size_t)output_image->width); output_image->row_pointers = malloc((size_t)output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
bt.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - BT This benchmark is an OpenMP C version of the NPB BT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: R. Van der Wijngaart T. Harris M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include <nautilus/shell.h> /* global variables */ #include "header.h" #define calloc(n,s) ({ void *_p=malloc(n*s);memset(_p,0,n*s); _p;}) /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void compute_rhs(void); static void set_constants(void); static void verify(int no_time_steps, char *class, boolean *verified); static void x_solve(void); static void x_backsubstitute(void); static void x_solve_cell(void); static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]); static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]); static void binvcrhs(double llhs[5][5], double c[5][5], double r[5]); static void binvrhs(double llhs[5][5], double r[5]); static void y_solve(void); static void y_backsubstitute(void); static void y_solve_cell(void); static void z_solve(void); static void z_backsubstitute(void); static void z_solve_cell(void); static int program_BT(char *__buf, void* __priv); int program_BT_profile(char *_, void* __); static struct shell_cmd_impl nas_bt_impl = { .cmd = "nas-bt", .help_str = "NAS parallel benchmark BT", .handler = program_BT_profile, }; nk_register_shell_cmd(nas_bt_impl); int program_BT_profile(char *_, void *__){ #ifdef NAUT_CONFIG_PROFILE nk_instrument_clear(); nk_instrument_start(); #endif program_BT(_,__); #ifdef NAUT_CONFIG_PROFILE nk_instrument_end(); nk_instrument_query(); #endif return 0; } static void * __m=0; #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) #define _malloc(n) ({ if (!__m) { __m = malloc(1UL<<33); if(!__m){printf("no __m\n"); }} void *__r = __m; unsigned long long __n = ALIGN(n, 16); __m+=__n; __r; }) static void arr_init_calloc(){ us_ptr = calloc(sizeof(s_matrix_t), 1); vs_ptr = calloc(sizeof(s_matrix_t), 1); ws_ptr = calloc(sizeof(s_matrix_t), 1); qs_ptr = calloc(sizeof(s_matrix_t), 1); rho_i_ptr = calloc(sizeof(s_matrix_t), 1); square_ptr = calloc(sizeof(s_matrix_t), 1); forcing_ptr = calloc(sizeof(f_matrix_t), 1); u_ptr = calloc(sizeof(u_matrix_t), 1); rhs_ptr = calloc(sizeof(rhs_matrix_t), 1); lhs_ptr = calloc(sizeof(lhs_matrix_t), 1); fjac_ptr = calloc(sizeof(jac_matrix_t), 1); njac_ptr = calloc(sizeof(jac_matrix_t), 1); } static void free_arr(){ free(us_ptr); free(vs_ptr); free(ws_ptr); free(qs_ptr); free(rho_i_ptr); free(square_ptr); free(forcing_ptr); free(u_ptr); free(rhs_ptr); free(lhs_ptr); free(fjac_ptr); free(njac_ptr); } /*-------------------------------------------------------------------- program BT c-------------------------------------------------------------------*/ static int program_BT(char *__buf, void* __priv) { int niter, step, n3; int nthreads = 1; double navg, mflops; double tmax; boolean verified; char class; //FILE *fp; //initialize array arr_init_calloc(); /*-------------------------------------------------------------------- c Root node reads input file (if it exists) else takes c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - BT Benchmark\n\n"); /* fp = fopen("inputbt.data", "r"); */ /* if (fp != NULL) { */ /* printf(" Reading from input file inputbt.data"); */ /* fscanf(fp, "%d", &niter); */ /* while (fgetc(fp) != '\n'); */ /* fscanf(fp, "%lg", &dt); */ /* while (fgetc(fp) != '\n'); */ /* fscanf(fp, "%d%d%d", */ /* &grid_points[0], &grid_points[1], &grid_points[2]); */ /* fclose(fp); */ /* } else { */ /* printf(" No input file inputbt.data. Using compiled defaults\n"); */ niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; // } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if (grid_points[0] > IMAX || grid_points[1] > JMAX || grid_points[2] > KMAX) { printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { if (step%20 == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } #pragma omp parallel { #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); printf("nthreads %d\n",nthreads); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &class, &verified); n3 = grid_points[0]*grid_points[1]*grid_points[2]; navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; if ( tmax != 0.0 ) { mflops = 1.0e-6*(double)niter* (3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax; } else { mflops = 0.0; } c_print_results("BT", class, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); free_arr(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void add(void) { /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ int i, j, k, m; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { #pragma omp parallel compute_rhs(); #pragma omp parallel x_solve(); #pragma omp parallel y_solve(); #pragma omp parallel z_solve(); #pragma omp parallel add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[i][j][k][m] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { add = rhs[i][j][k][m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m <= 4; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i < grid_points[0]-1; i++) { im1 = i-1; ip1 = i+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tx2*(ue[ip1][1]-ue[im1][1])+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { i = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (m = 0; m < 5; m++) { for (i = 1*3; i <= grid_points[0]-3*1-1; i++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j < grid_points[1]-1; j++) { jm1 = j-1; jp1 = j+1; forcing[i][j][k][0] = forcing[i][j][k][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { j = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (m = 0; m < 5; m++) { for (j = 1*3; j <= grid_points[1]-3*1-1; j++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k < grid_points[2]-1; k++) { km1 = k-1; kp1 = k+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { k = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (m = 0; m < 5; m++) { for (k = 1*3; k <= grid_points[2]-3*1-1; k++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7] + xi*ce[m][10]))) + eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8] + eta*ce[m][11])))+ zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + zeta*ce[m][12]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < IMAX; i++) { for (j = 0; j < IMAX; j++) { for (k = 0; k < IMAX; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = 1.0; } } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &(Pface[ix][0][0])); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ i = 0; xi = 0.0; #pragma omp for nowait for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ i = grid_points[0]-1; xi = 1.0; #pragma omp for for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ j = 0; eta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ j = grid_points[1]-1; eta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ k = 0; zeta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ k = grid_points[2]-1; zeta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { #pragma omp parallel { int i, j, k, m, n; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zero the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { lhs[i][j][k][0][m][n] = 0.0; lhs[i][j][k][1][m][n] = 0.0; lhs[i][j][k][2][m][n] = 0.0; } } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but convenient c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { lhs[i][j][k][1][m][m] = 1.0; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side in the xi-direction c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c determine a (labeled f) and n jacobians c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (i = 0; i < grid_points[0]; i++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 1.0; fjac[ i][ j][ k][0][2] = 0.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = ( 2.0 - c2 ) * ( u[i][j][k][1] / u[i][j][k][0] ); fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 ); fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][1][4] = c2; fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][2][3] = 0.0; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][2] = 0.0; fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][1] * tmp1 ); fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( 3.0*u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 ); njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = con43 * c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in x direction c-------------------------------------------------------------------*/ for (i = 1; i < grid_points[0]-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0] - tmp1 * njac[i-1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1] - tmp1 * njac[i-1][j][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2] - tmp1 * njac[i-1][j][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3] - tmp1 * njac[i-1][j][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4] - tmp1 * njac[i-1][j][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0] - tmp1 * njac[i-1][j][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1] - tmp1 * njac[i-1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2] - tmp1 * njac[i-1][j][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3] - tmp1 * njac[i-1][j][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4] - tmp1 * njac[i-1][j][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0] - tmp1 * njac[i-1][j][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1] - tmp1 * njac[i-1][j][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2] - tmp1 * njac[i-1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3] - tmp1 * njac[i-1][j][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4] - tmp1 * njac[i-1][j][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0] - tmp1 * njac[i-1][j][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1] - tmp1 * njac[i-1][j][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2] - tmp1 * njac[i-1][j][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3] - tmp1 * njac[i-1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4] - tmp1 * njac[i-1][j][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0] - tmp1 * njac[i-1][j][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1] - tmp1 * njac[i-1][j][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2] - tmp1 * njac[i-1][j][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3] - tmp1 * njac[i-1][j][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4] - tmp1 * njac[i-1][j][k][4][4] - tmp1 * dx5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0] - tmp1 * njac[i+1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1] - tmp1 * njac[i+1][j][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2] - tmp1 * njac[i+1][j][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3] - tmp1 * njac[i+1][j][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4] - tmp1 * njac[i+1][j][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0] - tmp1 * njac[i+1][j][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1] - tmp1 * njac[i+1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2] - tmp1 * njac[i+1][j][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3] - tmp1 * njac[i+1][j][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4] - tmp1 * njac[i+1][j][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0] - tmp1 * njac[i+1][j][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1] - tmp1 * njac[i+1][j][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2] - tmp1 * njac[i+1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3] - tmp1 * njac[i+1][j][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4] - tmp1 * njac[i+1][j][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0] - tmp1 * njac[i+1][j][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1] - tmp1 * njac[i+1][j][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2] - tmp1 * njac[i+1][j][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3] - tmp1 * njac[i+1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4] - tmp1 * njac[i+1][j][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0] - tmp1 * njac[i+1][j][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1] - tmp1 * njac[i+1][j][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2] - tmp1 * njac[i+1][j][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3] - tmp1 * njac[i+1][j][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4] - tmp1 * njac[i+1][j][k][4][4] - tmp1 * dx5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the tri-diagonal matrix; c determine a (labeled f) and n jacobians for cell c c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 0.0; fjac[ i][ j][ k][0][2] = 1.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][3] = 0.0; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][2][2] = ( 2.0 - c2 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1; fjac[i][j][k][2][4] = c2; fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = 0.0; fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * u[i][j][k][4] * tmp1 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] * tmp2; fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + 3.0 * u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = con43 * c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } } } /*-------------------------------------------------------------------- c now joacobians set, so form left hand side in y direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0] - tmp1 * njac[i][j-1][k][0][0] - tmp1 * dy1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1] - tmp1 * njac[i][j-1][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2] - tmp1 * njac[i][j-1][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3] - tmp1 * njac[i][j-1][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4] - tmp1 * njac[i][j-1][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0] - tmp1 * njac[i][j-1][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1] - tmp1 * njac[i][j-1][k][1][1] - tmp1 * dy2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2] - tmp1 * njac[i][j-1][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3] - tmp1 * njac[i][j-1][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4] - tmp1 * njac[i][j-1][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0] - tmp1 * njac[i][j-1][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1] - tmp1 * njac[i][j-1][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2] - tmp1 * njac[i][j-1][k][2][2] - tmp1 * dy3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3] - tmp1 * njac[i][j-1][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4] - tmp1 * njac[i][j-1][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0] - tmp1 * njac[i][j-1][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1] - tmp1 * njac[i][j-1][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2] - tmp1 * njac[i][j-1][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3] - tmp1 * njac[i][j-1][k][3][3] - tmp1 * dy4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4] - tmp1 * njac[i][j-1][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0] - tmp1 * njac[i][j-1][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1] - tmp1 * njac[i][j-1][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2] - tmp1 * njac[i][j-1][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3] - tmp1 * njac[i][j-1][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4] - tmp1 * njac[i][j-1][k][4][4] - tmp1 * dy5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0] - tmp1 * njac[i][j+1][k][0][0] - tmp1 * dy1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1] - tmp1 * njac[i][j+1][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2] - tmp1 * njac[i][j+1][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3] - tmp1 * njac[i][j+1][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4] - tmp1 * njac[i][j+1][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0] - tmp1 * njac[i][j+1][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1] - tmp1 * njac[i][j+1][k][1][1] - tmp1 * dy2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2] - tmp1 * njac[i][j+1][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3] - tmp1 * njac[i][j+1][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4] - tmp1 * njac[i][j+1][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0] - tmp1 * njac[i][j+1][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1] - tmp1 * njac[i][j+1][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2] - tmp1 * njac[i][j+1][k][2][2] - tmp1 * dy3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3] - tmp1 * njac[i][j+1][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4] - tmp1 * njac[i][j+1][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0] - tmp1 * njac[i][j+1][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1] - tmp1 * njac[i][j+1][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2] - tmp1 * njac[i][j+1][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3] - tmp1 * njac[i][j+1][k][3][3] - tmp1 * dy4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4] - tmp1 * njac[i][j+1][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0] - tmp1 * njac[i][j+1][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1] - tmp1 * njac[i][j+1][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2] - tmp1 * njac[i][j+1][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3] - tmp1 * njac[i][j+1][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4] - tmp1 * njac[i][j+1][k][4][4] - tmp1 * dy5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the block-diagonal matrix; c determine c (labeled f) and s jacobians c---------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 0; k < grid_points[2]; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][j][k][0][0] = 0.0; fjac[i][j][k][0][1] = 0.0; fjac[i][j][k][0][2] = 0.0; fjac[i][j][k][0][3] = 1.0; fjac[i][j][k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][1][2] = 0.0; fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][2][1] = 0.0; fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1; fjac[i][j][k][3][3] = ( 2.0 - c2 ) * u[i][j][k][3] * tmp1; fjac[i][j][k][3][4] = c2; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 ) - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + 3.0*u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 )* tmp1; } } } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in z direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0] - tmp1 * njac[i][j][k-1][0][0] - tmp1 * dz1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1] - tmp1 * njac[i][j][k-1][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2] - tmp1 * njac[i][j][k-1][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3] - tmp1 * njac[i][j][k-1][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4] - tmp1 * njac[i][j][k-1][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0] - tmp1 * njac[i][j][k-1][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1] - tmp1 * njac[i][j][k-1][1][1] - tmp1 * dz2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2] - tmp1 * njac[i][j][k-1][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3] - tmp1 * njac[i][j][k-1][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4] - tmp1 * njac[i][j][k-1][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0] - tmp1 * njac[i][j][k-1][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1] - tmp1 * njac[i][j][k-1][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2] - tmp1 * njac[i][j][k-1][2][2] - tmp1 * dz3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3] - tmp1 * njac[i][j][k-1][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4] - tmp1 * njac[i][j][k-1][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0] - tmp1 * njac[i][j][k-1][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1] - tmp1 * njac[i][j][k-1][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2] - tmp1 * njac[i][j][k-1][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3] - tmp1 * njac[i][j][k-1][3][3] - tmp1 * dz4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4] - tmp1 * njac[i][j][k-1][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0] - tmp1 * njac[i][j][k-1][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1] - tmp1 * njac[i][j][k-1][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2] - tmp1 * njac[i][j][k-1][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3] - tmp1 * njac[i][j][k-1][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4] - tmp1 * njac[i][j][k-1][4][4] - tmp1 * dz5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0] - tmp1 * njac[i][j][k+1][0][0] - tmp1 * dz1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1] - tmp1 * njac[i][j][k+1][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2] - tmp1 * njac[i][j][k+1][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3] - tmp1 * njac[i][j][k+1][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4] - tmp1 * njac[i][j][k+1][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0] - tmp1 * njac[i][j][k+1][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1] - tmp1 * njac[i][j][k+1][1][1] - tmp1 * dz2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2] - tmp1 * njac[i][j][k+1][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3] - tmp1 * njac[i][j][k+1][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4] - tmp1 * njac[i][j][k+1][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0] - tmp1 * njac[i][j][k+1][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1] - tmp1 * njac[i][j][k+1][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2] - tmp1 * njac[i][j][k+1][2][2] - tmp1 * dz3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3] - tmp1 * njac[i][j][k+1][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4] - tmp1 * njac[i][j][k+1][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0] - tmp1 * njac[i][j][k+1][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1] - tmp1 * njac[i][j][k+1][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2] - tmp1 * njac[i][j][k+1][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3] - tmp1 * njac[i][j][k+1][3][3] - tmp1 * dz4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4] - tmp1 * njac[i][j][k+1][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0] - tmp1 * njac[i][j][k+1][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1] - tmp1 * njac[i][j][k+1][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2] - tmp1 * njac[i][j][k+1][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3] - tmp1 * njac[i][j][k+1][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4] - tmp1 * njac[i][j][k+1][4][4] - tmp1 * dz5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { int i, j, k, m; double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { rho_inv = 1.0/u[i][j][k][0]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[i][j][k][1] * rho_inv; vs[i][j][k] = u[i][j][k][2] * rho_inv; ws[i][j][k] = u[i][j][k][3] * rho_inv; square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = forcing[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + u[i-1][j][k][0]) - tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]); rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + u[i-1][j][k][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[i+1][j][k][1]*up1 - u[i-1][j][k][1]*um1 + (u[i+1][j][k][4]- square[i+1][j][k]- u[i-1][j][k][4]+ square[i-1][j][k])* c2); rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i+1][j][k][2] - 2.0*u[i][j][k][2] + u[i-1][j][k][2]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[i+1][j][k][2]*up1 - u[i-1][j][k][2]*um1); rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i+1][j][k][3] - 2.0*u[i][j][k][3] + u[i-1][j][k][3]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[i+1][j][k][3]*up1 - u[i-1][j][k][3]*um1); rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i+1][j][k][4] - 2.0*u[i][j][k][4] + u[i-1][j][k][4]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i-1][j][k][4]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[i+1][j][k][4] - c2*square[i+1][j][k])*up1 - (c1*u[i-1][j][k][4] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } i = 2; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } #pragma omp for nowait for (i = 3; i < grid_points[0]-3; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m] ); } } } } i = grid_points[0]-3; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] ); } } } i = grid_points[0]-2; #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] + 5.0*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + u[i][j-1][k][0]) - ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]); rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + u[i][j-1][k][1]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[i][j+1][k][1]*vp1 - u[i][j-1][k][1]*vm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + u[i][j-1][k][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[i][j+1][k][2]*vp1 - u[i][j-1][k][2]*vm1 + (u[i][j+1][k][4] - square[i][j+1][k] - u[i][j-1][k][4] + square[i][j-1][k]) *c2); rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + u[i][j-1][k][3]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[i][j+1][k][3]*vp1 - u[i][j-1][k][3]*vm1); rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + u[i][j-1][k][4]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j-1][k][4]*rho_i[i][j-1][k]) - ty2 * ((c1*u[i][j+1][k][4] - c2*square[i][j+1][k]) * vp1 - (c1*u[i][j-1][k][4] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } j = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 3; j < grid_points[1]-3; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m] ); } } } } j = grid_points[1]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] ); } } } j = grid_points[1]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] + 5.*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + u[i][j][k-1][0]) - tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]); rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + u[i][j][k-1][1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[i][j][k+1][1]*wp1 - u[i][j][k-1][1]*wm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + u[i][j][k-1][2]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[i][j][k+1][2]*wp1 - u[i][j][k-1][2]*wm1); rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + u[i][j][k-1][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[i][j][k+1][3]*wp1 - u[i][j][k-1][3]*wm1 + (u[i][j][k+1][4] - square[i][j][k+1] - u[i][j][k-1][4] + square[i][j][k-1]) *c2); rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + u[i][j][k-1][4]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j][k-1][4]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[i][j][k+1][4] - c2*square[i][j][k+1])*wp1 - (c1*u[i][j][k-1][4] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } k = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 3; k < grid_points[2]-3; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m] ); } } } } k = grid_points[2]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] ); } } } k = grid_points[2]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 5.0*u[i][j][k][m] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { for (i = 1; i < grid_points[0]-1; i++) { rhs[i][j][k][m] = rhs[i][j][k][m] * dt; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *class, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine c-------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level c-------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing c-------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02 c-------------------------------------------------------------------*/ if (grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 60) { *class = 'S'; dtref = 1.0e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.7034283709541311e-01; xcrref[1] = 1.2975252070034097e-02; xcrref[2] = 3.2527926989486055e-02; xcrref[3] = 2.6436421275166801e-02; xcrref[4] = 1.9211784131744430e-01; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.9976913345811579e-04; xceref[1] = 4.5195666782961927e-05; xceref[2] = 7.3973765172921357e-05; xceref[3] = 7.3821238632439731e-05; xceref[4] = 8.9269630987491446e-04; /*-------------------------------------------------------------------- c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 24 && grid_points[1] == 24 && grid_points[2] == 24 && no_time_steps == 200) { *class = 'W'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.1125590409344e+03; xcrref[1] = 0.1180007595731e+02; xcrref[2] = 0.2710329767846e+02; xcrref[3] = 0.2469174937669e+02; xcrref[4] = 0.2638427874317e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.4419655736008e+01; xceref[1] = 0.4638531260002e+00; xceref[2] = 0.1011551749967e+01; xceref[3] = 0.9235878729944e+00; xceref[4] = 0.1018045837718e+02; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 200) { *class = 'A'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.0806346714637264e+02; xcrref[1] = 1.1319730901220813e+01; xcrref[2] = 2.5974354511582465e+01; xcrref[3] = 2.3665622544678910e+01; xcrref[4] = 2.5278963211748344e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.2348416040525025e+00; xceref[1] = 4.4390282496995698e-01; xceref[2] = 9.6692480136345650e-01; xceref[3] = 8.8302063039765474e-01; xceref[4] = 9.7379901770829278e+00; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 200 time steps, c with DT = 3.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 200) { *class = 'B'; dtref = 3.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.4233597229287254e+03; xcrref[1] = 9.9330522590150238e+01; xcrref[2] = 3.5646025644535285e+02; xcrref[3] = 3.2485447959084092e+02; xcrref[4] = 3.2707541254659363e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 5.2969847140936856e+01; xceref[1] = 4.4632896115670668e+00; xceref[2] = 1.3122573342210174e+01; xceref[3] = 1.2006925323559144e+01; xceref[4] = 1.2459576151035986e+02; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 200 time steps, c with DT = 1.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 200) { *class = 'C'; dtref = 1.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.62398116551764615e+04; xcrref[1] = 0.50793239190423964e+03; xcrref[2] = 0.15423530093013596e+04; xcrref[3] = 0.13302387929291190e+04; xcrref[4] = 0.11604087428436455e+05; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.16462008369091265e+03; xceref[1] = 0.11497107903824313e+02; xceref[2] = 0.41207446207461508e+02; xceref[3] = 0.37087651059694167e+02; xceref[4] = 0.36211053051841265e+03; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. c-------------------------------------------------------------------*/ if (*class != 'U') { printf(" Verification being performed for class %1c\n", *class); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified == TRUE) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c c Performs line solves in X direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c c-------------------------------------------------------------------*/ lhsx(); x_solve_cell(); x_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(isize)=rhs[isize) c else assume U(isize) is loaded in un pack backsub_info c so just use it c after call u(istart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (i = grid_points[0]-2; i >= 0; i--) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve_cell(void) { /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(IMAX) and rhs'(IMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i,j,k,isize; isize = grid_points[0]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(0,j,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[0][j][k][BB], lhs[0][j][k][CC], rhs[0][j][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (i = 1; i < isize; i++) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(i) = rhs(i) - A*rhs(i-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i-1][j][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(i) = B(i) - C(i-1)*A(i) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i-1][j][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(isize) = rhs(isize) - A*rhs(isize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[isize][j][k][AA], rhs[isize-1][j][k], rhs[isize][j][k]); /*-------------------------------------------------------------------- c B(isize) = B(isize) - C(isize-1)*A(isize) c-------------------------------------------------------------------*/ matmul_sub(lhs[isize][j][k][AA], lhs[isize-1][j][k][CC], lhs[isize][j][k][BB]); /*-------------------------------------------------------------------- c multiply rhs() by b_inverse() and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][k][BB], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts bvec=bvec - ablock*avec c-------------------------------------------------------------------*/ int i; for (i = 0; i < 5; i++) { /*-------------------------------------------------------------------- c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) c $ - lhs[i,1,ablock,ia,ja,ka,acell)* c-------------------------------------------------------------------*/ bvec[i] = bvec[i] - ablock[i][0]*avec[0] - ablock[i][1]*avec[1] - ablock[i][2]*avec[2] - ablock[i][3]*avec[3] - ablock[i][4]*avec[4]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k) c-------------------------------------------------------------------*/ int j; for (j = 0; j < 5; j++) { cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j] - ablock[0][1]*bblock[1][j] - ablock[0][2]*bblock[2][j] - ablock[0][3]*bblock[3][j] - ablock[0][4]*bblock[4][j]; cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j] - ablock[1][1]*bblock[1][j] - ablock[1][2]*bblock[2][j] - ablock[1][3]*bblock[3][j] - ablock[1][4]*bblock[4][j]; cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j] - ablock[2][1]*bblock[1][j] - ablock[2][2]*bblock[2][j] - ablock[2][3]*bblock[3][j] - ablock[2][4]*bblock[4][j]; cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j] - ablock[3][1]*bblock[1][j] - ablock[3][2]*bblock[2][j] - ablock[3][3]*bblock[3][j] - ablock[3][4]*bblock[4][j]; cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j] - ablock[4][1]*bblock[1][j] - ablock[4][2]*bblock[2][j] - ablock[4][3]*bblock[3][j] - ablock[4][4]*bblock[4][j]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvcrhs(double llhs[5][5], double c[5][5], double r[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/llhs[0][0]; llhs[0][1] = llhs[0][1]*pivot; llhs[0][2] = llhs[0][2]*pivot; llhs[0][3] = llhs[0][3]*pivot; llhs[0][4] = llhs[0][4]*pivot; c[0][0] = c[0][0]*pivot; c[0][1] = c[0][1]*pivot; c[0][2] = c[0][2]*pivot; c[0][3] = c[0][3]*pivot; c[0][4] = c[0][4]*pivot; r[0] = r[0] *pivot; coeff = llhs[1][0]; llhs[1][1]= llhs[1][1] - coeff*llhs[0][1]; llhs[1][2]= llhs[1][2] - coeff*llhs[0][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[0][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[0][4]; c[1][0] = c[1][0] - coeff*c[0][0]; c[1][1] = c[1][1] - coeff*c[0][1]; c[1][2] = c[1][2] - coeff*c[0][2]; c[1][3] = c[1][3] - coeff*c[0][3]; c[1][4] = c[1][4] - coeff*c[0][4]; r[1] = r[1] - coeff*r[0]; coeff = llhs[2][0]; llhs[2][1]= llhs[2][1] - coeff*llhs[0][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[0][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[0][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[0][4]; c[2][0] = c[2][0] - coeff*c[0][0]; c[2][1] = c[2][1] - coeff*c[0][1]; c[2][2] = c[2][2] - coeff*c[0][2]; c[2][3] = c[2][3] - coeff*c[0][3]; c[2][4] = c[2][4] - coeff*c[0][4]; r[2] = r[2] - coeff*r[0]; coeff = llhs[3][0]; llhs[3][1]= llhs[3][1] - coeff*llhs[0][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[0][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[0][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[0][4]; c[3][0] = c[3][0] - coeff*c[0][0]; c[3][1] = c[3][1] - coeff*c[0][1]; c[3][2] = c[3][2] - coeff*c[0][2]; c[3][3] = c[3][3] - coeff*c[0][3]; c[3][4] = c[3][4] - coeff*c[0][4]; r[3] = r[3] - coeff*r[0]; coeff = llhs[4][0]; llhs[4][1]= llhs[4][1] - coeff*llhs[0][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[0][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[0][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[0][4]; c[4][0] = c[4][0] - coeff*c[0][0]; c[4][1] = c[4][1] - coeff*c[0][1]; c[4][2] = c[4][2] - coeff*c[0][2]; c[4][3] = c[4][3] - coeff*c[0][3]; c[4][4] = c[4][4] - coeff*c[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/llhs[1][1]; llhs[1][2] = llhs[1][2]*pivot; llhs[1][3] = llhs[1][3]*pivot; llhs[1][4] = llhs[1][4]*pivot; c[1][0] = c[1][0]*pivot; c[1][1] = c[1][1]*pivot; c[1][2] = c[1][2]*pivot; c[1][3] = c[1][3]*pivot; c[1][4] = c[1][4]*pivot; r[1] = r[1] *pivot; coeff = llhs[0][1]; llhs[0][2]= llhs[0][2] - coeff*llhs[1][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[1][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[1][4]; c[0][0] = c[0][0] - coeff*c[1][0]; c[0][1] = c[0][1] - coeff*c[1][1]; c[0][2] = c[0][2] - coeff*c[1][2]; c[0][3] = c[0][3] - coeff*c[1][3]; c[0][4] = c[0][4] - coeff*c[1][4]; r[0] = r[0] - coeff*r[1]; coeff = llhs[2][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[1][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[1][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[1][4]; c[2][0] = c[2][0] - coeff*c[1][0]; c[2][1] = c[2][1] - coeff*c[1][1]; c[2][2] = c[2][2] - coeff*c[1][2]; c[2][3] = c[2][3] - coeff*c[1][3]; c[2][4] = c[2][4] - coeff*c[1][4]; r[2] = r[2] - coeff*r[1]; coeff = llhs[3][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[1][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[1][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[1][4]; c[3][0] = c[3][0] - coeff*c[1][0]; c[3][1] = c[3][1] - coeff*c[1][1]; c[3][2] = c[3][2] - coeff*c[1][2]; c[3][3] = c[3][3] - coeff*c[1][3]; c[3][4] = c[3][4] - coeff*c[1][4]; r[3] = r[3] - coeff*r[1]; coeff = llhs[4][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[1][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[1][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[1][4]; c[4][0] = c[4][0] - coeff*c[1][0]; c[4][1] = c[4][1] - coeff*c[1][1]; c[4][2] = c[4][2] - coeff*c[1][2]; c[4][3] = c[4][3] - coeff*c[1][3]; c[4][4] = c[4][4] - coeff*c[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/llhs[2][2]; llhs[2][3] = llhs[2][3]*pivot; llhs[2][4] = llhs[2][4]*pivot; c[2][0] = c[2][0]*pivot; c[2][1] = c[2][1]*pivot; c[2][2] = c[2][2]*pivot; c[2][3] = c[2][3]*pivot; c[2][4] = c[2][4]*pivot; r[2] = r[2] *pivot; coeff = llhs[0][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[2][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[2][4]; c[0][0] = c[0][0] - coeff*c[2][0]; c[0][1] = c[0][1] - coeff*c[2][1]; c[0][2] = c[0][2] - coeff*c[2][2]; c[0][3] = c[0][3] - coeff*c[2][3]; c[0][4] = c[0][4] - coeff*c[2][4]; r[0] = r[0] - coeff*r[2]; coeff = llhs[1][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[2][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[2][4]; c[1][0] = c[1][0] - coeff*c[2][0]; c[1][1] = c[1][1] - coeff*c[2][1]; c[1][2] = c[1][2] - coeff*c[2][2]; c[1][3] = c[1][3] - coeff*c[2][3]; c[1][4] = c[1][4] - coeff*c[2][4]; r[1] = r[1] - coeff*r[2]; coeff = llhs[3][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[2][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[2][4]; c[3][0] = c[3][0] - coeff*c[2][0]; c[3][1] = c[3][1] - coeff*c[2][1]; c[3][2] = c[3][2] - coeff*c[2][2]; c[3][3] = c[3][3] - coeff*c[2][3]; c[3][4] = c[3][4] - coeff*c[2][4]; r[3] = r[3] - coeff*r[2]; coeff = llhs[4][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[2][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[2][4]; c[4][0] = c[4][0] - coeff*c[2][0]; c[4][1] = c[4][1] - coeff*c[2][1]; c[4][2] = c[4][2] - coeff*c[2][2]; c[4][3] = c[4][3] - coeff*c[2][3]; c[4][4] = c[4][4] - coeff*c[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/llhs[3][3]; llhs[3][4] = llhs[3][4]*pivot; c[3][0] = c[3][0]*pivot; c[3][1] = c[3][1]*pivot; c[3][2] = c[3][2]*pivot; c[3][3] = c[3][3]*pivot; c[3][4] = c[3][4]*pivot; r[3] = r[3] *pivot; coeff = llhs[0][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[3][4]; c[0][0] = c[0][0] - coeff*c[3][0]; c[0][1] = c[0][1] - coeff*c[3][1]; c[0][2] = c[0][2] - coeff*c[3][2]; c[0][3] = c[0][3] - coeff*c[3][3]; c[0][4] = c[0][4] - coeff*c[3][4]; r[0] = r[0] - coeff*r[3]; coeff = llhs[1][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[3][4]; c[1][0] = c[1][0] - coeff*c[3][0]; c[1][1] = c[1][1] - coeff*c[3][1]; c[1][2] = c[1][2] - coeff*c[3][2]; c[1][3] = c[1][3] - coeff*c[3][3]; c[1][4] = c[1][4] - coeff*c[3][4]; r[1] = r[1] - coeff*r[3]; coeff = llhs[2][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[3][4]; c[2][0] = c[2][0] - coeff*c[3][0]; c[2][1] = c[2][1] - coeff*c[3][1]; c[2][2] = c[2][2] - coeff*c[3][2]; c[2][3] = c[2][3] - coeff*c[3][3]; c[2][4] = c[2][4] - coeff*c[3][4]; r[2] = r[2] - coeff*r[3]; coeff = llhs[4][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[3][4]; c[4][0] = c[4][0] - coeff*c[3][0]; c[4][1] = c[4][1] - coeff*c[3][1]; c[4][2] = c[4][2] - coeff*c[3][2]; c[4][3] = c[4][3] - coeff*c[3][3]; c[4][4] = c[4][4] - coeff*c[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/llhs[4][4]; c[4][0] = c[4][0]*pivot; c[4][1] = c[4][1]*pivot; c[4][2] = c[4][2]*pivot; c[4][3] = c[4][3]*pivot; c[4][4] = c[4][4]*pivot; r[4] = r[4] *pivot; coeff = llhs[0][4]; c[0][0] = c[0][0] - coeff*c[4][0]; c[0][1] = c[0][1] - coeff*c[4][1]; c[0][2] = c[0][2] - coeff*c[4][2]; c[0][3] = c[0][3] - coeff*c[4][3]; c[0][4] = c[0][4] - coeff*c[4][4]; r[0] = r[0] - coeff*r[4]; coeff = llhs[1][4]; c[1][0] = c[1][0] - coeff*c[4][0]; c[1][1] = c[1][1] - coeff*c[4][1]; c[1][2] = c[1][2] - coeff*c[4][2]; c[1][3] = c[1][3] - coeff*c[4][3]; c[1][4] = c[1][4] - coeff*c[4][4]; r[1] = r[1] - coeff*r[4]; coeff = llhs[2][4]; c[2][0] = c[2][0] - coeff*c[4][0]; c[2][1] = c[2][1] - coeff*c[4][1]; c[2][2] = c[2][2] - coeff*c[4][2]; c[2][3] = c[2][3] - coeff*c[4][3]; c[2][4] = c[2][4] - coeff*c[4][4]; r[2] = r[2] - coeff*r[4]; coeff = llhs[3][4]; c[3][0] = c[3][0] - coeff*c[4][0]; c[3][1] = c[3][1] - coeff*c[4][1]; c[3][2] = c[3][2] - coeff*c[4][2]; c[3][3] = c[3][3] - coeff*c[4][3]; c[3][4] = c[3][4] - coeff*c[4][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvrhs( double llhs[5][5], double r[5] ) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/llhs[0][0]; llhs[0][1] = llhs[0][1]*pivot; llhs[0][2] = llhs[0][2]*pivot; llhs[0][3] = llhs[0][3]*pivot; llhs[0][4] = llhs[0][4]*pivot; r[0] = r[0] *pivot; coeff = llhs[1][0]; llhs[1][1]= llhs[1][1] - coeff*llhs[0][1]; llhs[1][2]= llhs[1][2] - coeff*llhs[0][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[0][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[0][4]; r[1] = r[1] - coeff*r[0]; coeff = llhs[2][0]; llhs[2][1]= llhs[2][1] - coeff*llhs[0][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[0][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[0][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[0][4]; r[2] = r[2] - coeff*r[0]; coeff = llhs[3][0]; llhs[3][1]= llhs[3][1] - coeff*llhs[0][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[0][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[0][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[0][4]; r[3] = r[3] - coeff*r[0]; coeff = llhs[4][0]; llhs[4][1]= llhs[4][1] - coeff*llhs[0][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[0][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[0][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/llhs[1][1]; llhs[1][2] = llhs[1][2]*pivot; llhs[1][3] = llhs[1][3]*pivot; llhs[1][4] = llhs[1][4]*pivot; r[1] = r[1] *pivot; coeff = llhs[0][1]; llhs[0][2]= llhs[0][2] - coeff*llhs[1][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[1][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[1][4]; r[0] = r[0] - coeff*r[1]; coeff = llhs[2][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[1][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[1][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[1][4]; r[2] = r[2] - coeff*r[1]; coeff = llhs[3][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[1][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[1][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[1][4]; r[3] = r[3] - coeff*r[1]; coeff = llhs[4][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[1][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[1][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/llhs[2][2]; llhs[2][3] = llhs[2][3]*pivot; llhs[2][4] = llhs[2][4]*pivot; r[2] = r[2] *pivot; coeff = llhs[0][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[2][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[2][4]; r[0] = r[0] - coeff*r[2]; coeff = llhs[1][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[2][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[2][4]; r[1] = r[1] - coeff*r[2]; coeff = llhs[3][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[2][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[2][4]; r[3] = r[3] - coeff*r[2]; coeff = llhs[4][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[2][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/llhs[3][3]; llhs[3][4] = llhs[3][4]*pivot; r[3] = r[3] *pivot; coeff = llhs[0][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[3][4]; r[0] = r[0] - coeff*r[3]; coeff = llhs[1][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[3][4]; r[1] = r[1] - coeff*r[3]; coeff = llhs[2][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[3][4]; r[2] = r[2] - coeff*r[3]; coeff = llhs[4][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/llhs[4][4]; r[4] = r[4] *pivot; coeff = llhs[0][4]; r[0] = r[0] - coeff*r[4]; coeff = llhs[1][4]; r[1] = r[1] - coeff*r[4]; coeff = llhs[2][4]; r[2] = r[2] - coeff*r[4]; coeff = llhs[3][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Y direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix][ c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsy(); y_solve_cell(); y_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell][ then generate U(jsize)=rhs(jsize) c else assume U(jsize) is loaded in un pack backsub_info c so just use it c after call u(jstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (j = grid_points[1]-2; j >= 0; j--) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(JMAX) and rhs'(JMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, jsize; jsize = grid_points[1]-1; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(i,0,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][0][k][BB], lhs[i][0][k][CC], rhs[i][0][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (j = 1; j < jsize; j++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(j-1) from lhs_vector(j) c c rhs(j) = rhs(j) - A*rhs(j-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j-1][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(j) = B(j) - C(j-1)*A(j) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j-1][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][jsize][k][AA], rhs[i][jsize-1][k], rhs[i][jsize][k]); /*-------------------------------------------------------------------- c B(jsize) = B(jsize) - C(jsize-1)*A(jsize) c call matmul_sub(aa,i,jsize,k,c, c $ cc,i,jsize-1,k,c,BB,i,jsize,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][jsize][k][AA], lhs[i][jsize-1][k][CC], lhs[i][jsize][k][BB]); /*-------------------------------------------------------------------- c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][jsize][k][BB], rhs[i][jsize][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Z direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsz(); z_solve_cell(); z_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(ksize)=rhs(ksize) c else assume U(ksize) is loaded in un pack backsub_info c so just use it c after call u(kstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = grid_points[2]-2; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(KMAX) and rhs'(KMAX) will be sent to next cell. c-------------------------------------------------------------------*/ int i,j,k,ksize; ksize = grid_points[2]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c multiply c(i,j,0) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][0][BB], lhs[i][j][0][CC], rhs[i][j][0] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (k = 1; k < ksize; k++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(k-1) from lhs_vector(k) c c rhs(k) = rhs(k) - A*rhs(k-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j][k-1], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(k) = B(k) - C(k-1)*A(k) c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j][k-1][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- c Now finish up special cases for last cell c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][ksize][AA], rhs[i][j][ksize-1], rhs[i][j][ksize]); /*-------------------------------------------------------------------- c B(ksize) = B(ksize) - C(ksize-1)*A(ksize) c call matmul_sub(aa,i,j,ksize,c, c $ cc,i,j,ksize-1,c,BB,i,j,ksize) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][ksize][AA], lhs[i][j][ksize-1][CC], lhs[i][j][ksize][BB]); /*-------------------------------------------------------------------- c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][ksize][BB], rhs[i][j][ksize] ); } } }